7 #ifndef MSHADOW_TENSOR_CPU_INL_H_ 8 #define MSHADOW_TENSOR_CPU_INL_H_ 31 bool create_dnn_handle,
41 inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) {
43 for (
int i = 0; i < ndim; ++i) {
44 if (i != 0) os <<
',';
48 if (ndim == 1) os <<
',';
53 template<
typename xpu>
55 template<
typename xpu>
60 inline void *AllocHost_<gpu>(
size_t size) {
66 inline void FreeHost_<gpu>(
void *dptr) {
81 template<
typename xpu,
int dim,
typename DType>
85 void *dptr = AllocHost_<xpu>(obj->
MSize() *
sizeof(DType));
86 obj->
dptr_ =
reinterpret_cast<DType*
>(dptr);
88 template<
typename xpu,
int dim,
typename DType>
90 if (obj->
dptr_ == NULL) {
91 LOG(FATAL) <<
"FreeHost:: double free";
93 FreeHost_<xpu>(obj->
dptr_);
97 template<
int dim,
typename DType>
103 (&pitch, obj->
size(dim - 1) *
sizeof(DType), obj->
shape_.FlatTo2D()[0]);
108 (&pitch, obj->
shape_.Size() *
sizeof(DType), 1);
110 obj->
dptr_ =
reinterpret_cast<DType*
>(dptr);
112 template<
typename Device,
typename DType,
int dim>
121 template<
int dim,
typename DType>
126 template<
int dim,
typename DType>
131 <<
"Copy:shape mismatch:" << _dst.
shape_ <<
" vs " << _src.
shape_;
138 memcpy(dst[y].dptr_, src[y].dptr_,
sizeof(DType) * dst.
size(1));
143 template<
typename Saver,
typename R,
int dim,
144 typename DType,
typename E>
150 #pragma omp parallel for 154 for (
index_t x = 0; x < shape[1]; ++x) {
156 Saver::template Save<DType>(dplan.REval(y, x), plan.
Eval(y, x));
161 template<
bool pass_check,
typename Saver,
163 typename DType,
typename E,
int etype>
171 template<
typename SV,
int dim,
typename DType,
typename E,
int etype>
173 dim, DType, E, etype> {
178 expr::MapPacketPlan<SV>(dst->
self(),
179 expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.
self()));
187 template<
typename Saver,
typename R,
int dim,
188 typename DType,
typename E,
int etype>
192 ::Error_All_Tensor_in_Exp_Must_Have_Same_Type();
195 CHECK(eshape[0] == 0 || eshape == dshape)
196 <<
"Assignment: Shape of Tensors are not consistent with target, " 197 <<
"eshape: " << eshape <<
" dshape:" << dshape;
199 Saver, R, dim, DType, E, etype>
203 template<
typename Saver,
typename Reducer,
204 typename R,
typename DType,
typename E,
int etype>
209 ::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
211 ::Check(exp.
self()).FlatTo2D();
213 CHECK_EQ(eshape[1], dshape[0]) <<
"MapReduceKeepLowest::reduction dimension do not match";
214 CHECK_NE(eshape[0], 0U) <<
"can not reduce over empty tensor";
219 #pragma omp parallel for 222 DType res = splan.Eval(0, x);
223 for (
index_t y = 1; y < eshape[0]; ++y) {
224 Reducer::Reduce(res, splan.Eval(y, x));
226 Saver::template Save<DType>(dplan.REval(0, x), res * scale);
230 template<
typename Saver,
typename Reducer,
int dimkeep,
231 typename R,
typename DType,
typename E,
int etype>
236 ::Error_TypeCheck_Not_Pass_For_Reduce_Exp();
241 CHECK_EQ(eshape[dimkeep], dshape[0])
242 <<
"MapReduceKeepHighDim::reduction dimension do not match";
246 eshape.ProdShape(dimkeep + 1, EShape::kSubdim),
247 eshape[EShape::kSubdim]);
252 #pragma omp parallel for 255 DType res; Reducer::SetInitValue(res);
256 for (
index_t n = 0; n < pshape[0]; ++n) {
257 DType tres; Reducer::SetInitValue(tres);
258 for (
index_t y = 0; y < pshape[2]; ++y) {
259 for (
index_t x = 0; x < pshape[3]; ++x) {
260 Reducer::Reduce(tres,
261 splan.Eval((n * pshape[1] + c) * pshape[2] + y, x));
264 Reducer::Reduce(res, tres);
266 Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale));
270 template<
typename DType>
273 DType mmax = energy[0];
275 if (mmax < energy[x]) mmax = energy[x];
277 DType sum = DType(0.0f);
279 dst[x] = std::exp(energy[x] - mmax);
287 template<
typename DType>
291 #pragma omp parallel for 293 const index_t k =
static_cast<int>(label[y]);
296 dst[y][k] = src[y][k] - 1.0f;
298 dst[y][x] = src[y][x];
304 template<
typename DType>
309 const float smooth_grad = (alpha / (dst.
size(1) - 1));
310 #pragma omp parallel for 312 const index_t k =
static_cast<int>(label[y]);
315 dst[y][k] = src[y][k] - 1.0f + alpha;
317 dst[y][x] = src[y][x] - smooth_grad;
324 template<
typename DType>
328 const DType &ignore_label) {
329 #pragma omp parallel for 331 const int k =
static_cast<int>(label[y]);
332 for (
int x = 0; x < static_cast<int>(dst.
size(1)); ++x) {
333 if (static_cast<int>(ignore_label) == k) {
337 dst[y][k] = src[y][k] - 1.0f;
339 dst[y][x] = src[y][x];
346 template<
typename DType>
350 const DType &ignore_label,
352 const float smooth_grad = (alpha / (dst.
size(1) - 1));
353 #pragma omp parallel for 355 const int k =
static_cast<int>(label[y]);
356 for (
int x = 0; x < static_cast<int>(dst.
size(1)); ++x) {
357 if (static_cast<int>(ignore_label) == k) {
361 dst[y][k] = src[y][k] - 1.0f + alpha;
363 dst[y][x] = src[y][x] - smooth_grad;
370 template<
typename DType>
374 #pragma omp parallel for 377 const int k =
static_cast<int>(label[y][n]);
378 for (
int x = 0; x < static_cast<int>(dst.
size(1)); ++x) {
380 dst[y][k][n] = src[y][k][n] - 1.0f;
382 dst[y][x][n] = src[y][x][n];
389 template<
typename DType>
394 const float smooth_grad = (alpha / (dst.
size(1) - 1));
395 #pragma omp parallel for 398 const int k =
static_cast<int>(label[y][n]);
399 for (
int x = 0; x < static_cast<int>(dst.
size(1)); ++x) {
401 dst[y][k][n] = src[y][k][n] - 1.0f + alpha;
403 dst[y][x][n] = src[y][x][n] - smooth_grad;
410 template<
typename DType>
414 const DType &ignore_label) {
415 #pragma omp parallel for 418 const int k =
static_cast<int>(label[y][n]);
419 if (k == static_cast<int>(ignore_label)) {
420 for (
int x = 0; x < static_cast<int>(dst.
size(1)); ++x) {
421 dst[y][x][n] = DType(0.0f);
424 for (
int x = 0; x < static_cast<int>(dst.
size(1)); ++x) {
426 dst[y][k][n] = src[y][k][n] - 1.0f;
428 dst[y][x][n] = src[y][x][n];
436 template<
typename DType>
440 const DType &ignore_label,
442 const float smooth_grad = (alpha / (dst.
size(1) - 1));
443 #pragma omp parallel for 446 const int k =
static_cast<int>(label[y][n]);
447 if (k == static_cast<int>(ignore_label)) {
448 for (
int x = 0; x < static_cast<int>(dst.
size(1)); ++x) {
449 dst[y][x][n] = DType(0.0f);
452 for (
int x = 0; x < static_cast<int>(dst.
size(1)); ++x) {
454 dst[y][k][n] = src[y][k][n] - 1.0f + alpha;
456 dst[y][x][n] = src[y][x][n] - smooth_grad;
464 template<
typename DType>
467 CHECK_EQ(dst.
shape_, energy.
shape_) <<
"Softmax: shape mismatch";
468 #pragma omp parallel for 474 template<
typename DType>
477 CHECK_EQ(dst.
shape_, energy.
shape_) <<
"Softmax: shape mismatch";
478 #pragma omp parallel for 481 DType mmax = energy[y][0][n];
483 if (mmax < energy[y][x][n]) mmax = energy[y][x][n];
485 DType sum = DType(0.0f);
487 dst[y][x][n] = std::exp(energy[y][x][n] - mmax);
497 template<
bool clip,
typename IndexType,
typename DType>
501 const int K = dst.
shape_[0];
506 else if (j >= K) j = K - 1;
515 template<
typename IndexType,
typename DType>
521 dst[sorted[y]] += src[index[y]];
525 template<
typename IndexType,
typename DType>
531 dst[index[y]][j] = src[y][j];
536 template<
typename KDType,
typename VDType>
541 CHECK_EQ(keys.
size(0), values.
size(0))
542 <<
"The sizes of key/value are not equal! keys_size: " << keys.
size(0)
543 <<
"values_size: " << values.
size(0);
544 std::vector<size_t> idx(keys.
size(0));
545 std::vector<KDType> keys_vec(keys.
size(0));
546 std::vector<VDType> values_vec(values.
size(0));
547 for (
int i = 0; i < keys.
size(0); i++) {
549 keys_vec[i] = keys[i];
550 values_vec[i] = values[i];
553 std::stable_sort(idx.begin(), idx.end(),
554 [&keys_vec](
size_t i1,
size_t i2)
555 {
return keys_vec[i1] < keys_vec[i2]; });
557 std::stable_sort(idx.begin(), idx.end(),
558 [&keys_vec](
size_t i1,
size_t i2)
559 {
return keys_vec[i1] > keys_vec[i2]; });
562 keys[i] = keys_vec[idx[i]];
563 values[i] = values_vec[idx[i]];
567 template<
typename Device,
typename VDType,
typename SDType>
575 template<
typename Device,
typename DType>
580 <<
"VectorDot: Shape mismatch";
581 CHECK_EQ(dst.
size(0), 1U)
582 <<
"VectorDot: expect dst to be scalar";
588 template<
bool transpose_left,
bool transpose_right,
typename Device,
typename DType>
604 CHECK(sleft[0] == batch_size && sright[0] == batch_size)
605 <<
"BatchGEMM: batchsize must be equal." 606 <<
"dst: " << dst.
shape_ <<
"\n" 607 <<
"lhs: " << sleft <<
"\n" 608 <<
"rhs: " << sright <<
"\n";
609 CHECK(dst.
size(1) == sleft[1] && dst.
size(2) == sright[2] && sleft[2] == sright[1])
610 <<
"BatchGEMM: matrix shape mismatch" 611 <<
"dst: " << dst.
shape_ <<
"\n" 612 <<
"lhs: " << sleft <<
"\n" 613 <<
"rhs: " << sright <<
"\n";
614 CHECK(workspace.
size(0) >= 3 * batch_size)
615 <<
"Workspace Size must be bigger than " << 3 * batch_size;
620 transpose_right, transpose_left,
621 transpose_right ? rhs.
size(1) : rhs.
size(2),
622 transpose_left ? lhs.
size(2) : lhs.
size(1),
623 transpose_right ? rhs.
size(2) : rhs.
size(1),
632 #endif // MSHADOW_TENSOR_CPU_INL_H_ void VectorDot(Tensor< Device, 1, DType > dst, const Tensor< Device, 1, DType > &lhs, const Tensor< Device, 1, DType > &rhs)
CPU/GPU: 1 dimension vector dot.
Definition: tensor_cpu-inl.h:576
static void batched_gemm(Stream< Device > *stream, bool transa, bool transb, int m, int n, int k, DType alpha, const DType *A, int lda, const DType *B, int ldb, DType beta, DType *C, int ldc, int batch_count, DType **workspace)
Definition: dot_engine-inl.h:73
void FreeSpace(Tensor< cpu, dim, DType > *obj)
CPU/GPU: free the space of tensor, will set obj.dptr to NULL.
Definition: tensor_cpu-inl.h:122
void ShutdownTensorEngine< cpu >(void)
Definition: tensor_cpu-inl.h:23
Stream< Device > * stream_
Definition: tensor.h:556
void IndexFill(Tensor< cpu, 2, DType > dst, const Tensor< cpu, 1, IndexType > &index, const Tensor< cpu, 2, DType > &src)
CPU/GPU: Fill the values of the destination matrix to specific rows in the source matrix...
Definition: tensor_cpu-inl.h:526
void SoftmaxGrad(Tensor< cpu, 2, DType > dst, const Tensor< cpu, 2, DType > &src, const Tensor< cpu, 1, DType > &label)
CPU/GPU: softmax gradient.
Definition: tensor_cpu-inl.h:288
void SmoothSoftmaxGrad(Tensor< cpu, 2, DType > dst, const Tensor< cpu, 2, DType > &src, const Tensor< cpu, 1, DType > &label, const float alpha)
Definition: tensor_cpu-inl.h:305
PaddingExp< SrcExp, DType, ExpInfo< SrcExp >::kDim > pad(const Exp< SrcExp, DType, etype > &src, index_t pad)
padding expression, pad a image with zeros on boundaries, padding affects shape[0], and shape[1]
Definition: pad.h:53
DType * dptr_
pointer to the data
Definition: tensor.h:416
void FreeHost_(void *dptr)
Tensor RValue, this is the super type of all kinds of possible tensors.
Definition: tensor.h:391
Definition: expr_engine-inl.h:40
void SetDevice< cpu >(int devid)
Definition: tensor_cpu-inl.h:27
used to help static type check
Definition: expr_engine-inl.h:312
void AlignedFree(void *ptr)
free aligned space
Definition: packet-inl.h:84
void Copy(Tensor< cpu, dim, DType > dst, const Tensor< cpu, dim, DType > &src, Stream< cpu > *stream=NULL)
copy data from one tensor to another, with same shape
Definition: tensor_cpu-inl.h:127
void MapExp(TRValue< R, cpu, dim, DType > *dst, const expr::Exp< E, DType, etype > &exp)
CPU/GPU: map a expression to a tensor, this function calls MapPlan.
Definition: tensor_cpu-inl.h:189
Container * ptrself(void)
Definition: expression.h:68
Shape< dimension > shape_
shape of the tensor
Definition: tensor.h:418
Definition: packet-inl.h:357
MSHADOW_XINLINE DType Eval(index_t y, index_t x) const
evaluate the expression at index [y][x] to be implemented by SubType, for RValue, the return type wil...
MSHADOW_XINLINE Shape< 4 > Shape4(index_t s0, index_t s1, index_t s2, index_t s3)
construct a four dimension shape, stride will equal s0
Definition: tensor.h:222
void SortByKey(Tensor< cpu, 1, KDType > keys, Tensor< cpu, 1, VDType > values, bool is_ascend=true)
CPU/GPU: Sort key-value pairs stored in separate places. (Stable sort is performed!) ...
Definition: tensor_cpu-inl.h:537
void Softmax(Tensor< cpu, 2, DType > dst, const Tensor< cpu, 2, DType > &energy)
CPU/GPU: normalize softmax: dst[i][j] = exp(energy[i][j]) /(sum_j exp(energy[i][j])) ...
Definition: tensor_cpu-inl.h:465
void VectorizedSort(Tensor< Device, 1, VDType > values, Tensor< Device, 1, SDType > segments)
CPU/GPU: Sort the keys within each segment. (Stable sort is performed!) Segments is defined as an asc...
Definition: tensor_cpu-inl.h:568
void * AlignedMallocPitch(size_t *out_pitch, size_t lspace, size_t num_line)
analog to cudaMallocPitch, allocate a aligned space with num_line * lspace cells
Definition: packet-inl.h:59
void BatchGEMM(Tensor< Device, 3, DType > dst, const Tensor< Device, 3, DType > &lhs, const Tensor< Device, 3, DType > &rhs, DType alpha, DType beta, Tensor< Device, 1, DType * > workspace)
CPU/GPU: dst = alpha * op(lhs) op(rhs) + beta * dst.
Definition: tensor_cpu-inl.h:589
#define MSHADOW_CUDA_CALL(func)
Protected cuda call in mshadow.
Definition: base.h:252
void MapReduceKeepLowest(TRValue< R, cpu, 1, DType > *dst, const expr::Exp< E, DType, etype > &exp, DType scale=1)
CPU/GPU: map a expression, do reduction to 1D Tensor in lowest dimension (dimension 0) ...
Definition: tensor_cpu-inl.h:205
static Shape< dim > Check(const E &t)
device name CPU
Definition: tensor.h:21
MSHADOW_XINLINE Tensor< Device, 2, DType > FlatTo2D(void) const
flatten the tensor to 2 dimension, collapse the higher dimensions together
Definition: tensor.h:501
void * AllocHost_(size_t size)
MSHADOW_XINLINE index_t size(index_t i) const
Definition: tensor.h:588
void FreeHost_< cpu >(void *dptr)
Definition: tensor_cpu-inl.h:77
int32_t index_t
type that will be used for index
Definition: base.h:291
void AllocSpace(Tensor< cpu, dim, DType > *obj, bool pad=MSHADOW_ALLOC_PAD)
CPU/CPU: allocate space for CTensor, according to the shape in the obj this function is responsible t...
Definition: tensor_cpu-inl.h:98
DType * dptr_
Definition: tensor.h:553
Generic packet vectorization code.
void InitTensorEngine< cpu >(int dev_id)
Definition: tensor_cpu-inl.h:20
void AddTakeGradLargeBatch(Tensor< cpu, 2, DType > dst, const Tensor< cpu, 1, IndexType > &sorted, const Tensor< cpu, 1, IndexType > &index, const Tensor< cpu, 2, DType > &src)
CPU/GPU: Gradient accumulate of embedding matrix. dst[sorted[i]] += src[index[i]] Called when the bat...
Definition: tensor_cpu-inl.h:516
static void dot(Stream< Device > *stream, int n, const DType *X, int incX, const DType *Y, int incY, DType *ret)
Definition: dot_engine-inl.h:107
void AllocHost(Tensor< cpu, dim, DType > *obj)
Definition: tensor_cpu-inl.h:82
runtime shape checking template get the shape of an expression, report error if shape mismatch ...
Definition: expr_engine-inl.h:346
Stream< cpu > * NewStream< cpu >(bool create_blas_handle, bool create_dnn_handle, int dev_id)
Definition: tensor_cpu-inl.h:30
void MapPlan(TRValue< R, cpu, dim, DType > *dst, const expr::Plan< E, DType > &plan)
Definition: tensor_cpu-inl.h:145
MSHADOW_XINLINE bool CheckContiguous(void) const
Definition: tensor.h:473
Definition: tensor_cpu-inl.h:164
scalar expression
Definition: expression.h:77
void MapReduceKeepHighDim(TRValue< R, cpu, 1, DType > *dst, const expr::Exp< E, DType, etype > &exp, DType scale=1)
CPU/GPU: map a expression, do reduction to 1D Tensor in third dimension (dimension 2) ...
Definition: tensor_cpu-inl.h:232
void * AllocHost_< cpu >(size_t size)
Definition: tensor_cpu-inl.h:72
Tensor< Device, dim, DType > NewTensor(const Shape< dim > &shape, DType initv, bool pad=MSHADOW_ALLOC_PAD, Stream< Device > *stream=NULL)
CPU/GPU: short cut to allocate and initialize a Tensor.
Definition: tensor_cpu-inl.h:114
defines how expression exp can be evaluated and stored into dst
Definition: expression.h:61
const Container & self(void) const
Definition: expression.h:64
Plan< BinaryMapExp< OP, TA, TB, DType, etype >, DType > MakePlan(const BinaryMapExp< OP, TA, TB, DType, etype > &e)
Definition: expr_engine-inl.h:221
void AddTakeGrad(Tensor< cpu, 2, DType > dst, const Tensor< cpu, 1, IndexType > &index, const Tensor< cpu, 2, DType > &src)
CPU/GPU: Gradient accumulate of embedding matrix. dst[index[i]] += src[i] Called when the featuredim ...
Definition: tensor_cpu-inl.h:498
MSHADOW_XINLINE Shape< 3 > Shape3(index_t s0, index_t s1, index_t s2)
construct a three dimension shape, stride will equal s0
Definition: tensor.h:209
namespace for mshadow
Definition: base.h:282
void FreeHost(Tensor< cpu, dim, DType > *obj)
Definition: tensor_cpu-inl.h:89
MSHADOW_XINLINE index_t size(int idx) const
return size of i-th dimension, start counting from highest dimension
Definition: tensor.h:487
index_t stride_
storing the stride information in x dimension this is used to deal with pitch allocation in gpu or ss...
Definition: tensor.h:423
#define MSHADOW_DEFAULT_PACKET
Definition: packet-inl.h:29
general tensor
Definition: tensor.h:402
static void SetStream(Stream< Device > *stream)
Definition: dot_engine-inl.h:64
MSHADOW_XINLINE index_t MSize(void) const
Definition: tensor.h:479
void DeleteStream< cpu >(Stream< cpu > *stream)
Definition: tensor_cpu-inl.h:36
static void Map(Tensor< cpu, dim, DType > *dst, const expr::Exp< E, DType, etype > &exp)
Definition: tensor_cpu-inl.h:174
index_t openmp_index_t
openmp index for linux
Definition: base.h:299
Stream< Device > * stream_
stream where the computation lies stream is a device dependency concept where each computation ...
Definition: tensor.h:428
definitions of how Matrix Multiplications can be evaluated
static void Map(TRValue< R, cpu, dim, DType > *dst, const expr::Exp< E, DType, etype > &exp)
Definition: tensor_cpu-inl.h:165
computaion stream structure, used for asynchronous computations
Definition: tensor.h:365