26 #ifndef MXNET_COMMON_TENSOR_INSPECTOR_H_
27 #define MXNET_COMMON_TENSOR_INSPECTOR_H_
34 #include "../../3rdparty/mshadow/mshadow/base.h"
44 static std::mutex mtx;
45 static std::unique_ptr<InspectorManager> im =
nullptr;
47 std::unique_lock<std::mutex> lk(mtx);
49 im = std::make_unique<InspectorManager>();
110 template <
typename DType,
typename StreamType>
111 void tensor_info_to_string(StreamType* os) {
112 const int dimension = tb_.
ndim();
113 *os <<
"<" << infer_type_string(
typeid(DType)) <<
" Tensor ";
115 for (
int i = 1; i < dimension; ++i) {
116 *os <<
'x' << tb_.
shape_[i];
118 *os <<
">" << std::endl;
128 template <
typename DType,
typename StreamType>
129 void tensor_info_to_string(StreamType* os,
const std::vector<index_t>& shape) {
130 const int dimension = shape.size();
131 *os <<
"<" << infer_type_string(
typeid(DType)) <<
" Tensor ";
133 for (
int i = 1; i < dimension; ++i) {
134 *os <<
'x' << shape[i];
136 *os <<
">" << std::endl;
145 template <
typename DType,
typename StreamType>
146 void to_string_helper(StreamType* os) {
149 TensorInspector(test::CAccessAsCPU(ctx_, tb_,
false)(), ctx_).to_string_helper<DType>(os);
152 #endif // MXNET_USE_CUDA
153 const int dimension = tb_.
ndim();
154 std::vector<index_t> offsets;
156 for (
int i = dimension - 1; i >= 0; --i) {
157 multiple *= tb_.
shape_[i];
158 offsets.push_back(multiple);
160 *os << std::string(dimension,
'[');
161 *os << tb_.
dptr<DType>()[0];
164 for (
auto off : offsets) {
168 *os << std::string(n,
']') <<
", " << std::string(n,
'[');
172 *os << tb_.
dptr<DType>()[i];
174 *os << std::string(dimension,
']') << std::endl;
175 tensor_info_to_string<DType>(os);
185 template <
typename DType,
typename StreamType>
186 void to_string_helper(StreamType* os,
const DType* dptr) {
190 .to_string_helper<DType>(os, dptr);
193 #endif // MXNET_USE_CUDA
194 *os << *dptr << std::endl;
195 *os <<
"<" <<
typeid(*dptr).name() <<
">" << std::endl;
206 template <
typename DType,
typename StreamType>
207 void to_string_helper(StreamType* os,
const std::vector<index_t>& sub_shape,
index_t offset) {
211 .to_string_helper<DType>(os, sub_shape, offset);
214 #endif // MXNET_USE_CUDA
215 DType* dptr = tb_.
dptr<DType>() + offset;
216 if (sub_shape.size() == 0) {
217 to_string_helper<DType>(os, dptr);
220 const int dimension = sub_shape.size();
221 std::vector<index_t> offsets;
223 for (
int i = dimension - 1; i >= 0; --i) {
224 multiple *= sub_shape[i];
225 offsets.push_back(multiple);
227 std::stringstream ss;
228 *os << std::string(dimension,
'[');
230 for (
index_t i = 1; i < multiple; ++i) {
232 for (
auto off : offsets) {
236 *os << std::string(n,
']') <<
", " << std::string(n,
'[');
242 *os << std::string(dimension,
']') << std::endl;
243 tensor_info_to_string<DType>(os, sub_shape);
253 void print_locator(
const std::vector<index_t>& pos,
254 std::vector<index_t>* sub_shape,
256 const int dimension = tb_.
ndim();
257 const int sub_dim = dimension - pos.size();
258 sub_shape->resize(sub_dim);
260 for (
size_t i = pos.size(), j = 0; i <
static_cast<size_t>(dimension); ++i, ++j) {
261 (*sub_shape)[j] = tb_.
shape_[i];
262 multiple *= tb_.
shape_[i];
266 for (
index_t i = pos.size() - 1; i >= 0; --i) {
270 *offset = sum * multiple;
278 bool parse_position(std::vector<index_t>* pos,
const std::string& str) {
279 const int dimension = tb_.
ndim();
280 std::istringstream ss(str);
284 if (ss.peek() ==
',') {
288 if (pos->size() >
static_cast<size_t>(dimension)) {
291 for (
size_t i = 0; i < pos->size(); ++i) {
292 if ((*pos)[i] > (tb_.
shape_[i] - 1) || (*pos)[i] < 0) {
296 return !pos->empty();
304 template <
typename DType>
305 void interactive_print_helper(std::string tag) {
309 .interactive_print_helper<DType>(tag);
312 #endif // MXNET_USE_CUDA
316 std::cout <<
"----------Interactive Print----------" << std::endl;
318 std::cout <<
"Tag: " << tag
322 tensor_info_to_string<DType>(&std::cout);
323 std::cout <<
"To print a part of the tensor, "
324 <<
"please specify a position, seperated by \",\"" << std::endl;
325 std::cout <<
"\"e\" for the entire tensor, "
326 <<
"\"d\" to dump value to file, "
327 <<
"\"b\" to break, "
328 <<
"\"s\" to skip all: ";
333 }
else if (str ==
"e") {
334 to_string_helper<DType>(&std::cout);
336 }
else if (str ==
"s") {
339 }
else if (str ==
"d") {
341 std::cout <<
"Please enter a tag: ";
343 if (str.find(
' ') != std::string::npos) {
344 std::cout <<
"Invalid tag name. No space allowed.";
347 dump_to_file_helper<DType>(str);
352 std::vector<index_t> pos;
353 if (parse_position(&pos, str)) {
354 std::vector<index_t> sub_shape;
356 print_locator(pos, &sub_shape, &offset);
357 to_string_helper<DType>(&std::cout, sub_shape, offset);
359 std::cout <<
"invalid command/indices" << std::endl;
369 template <
typename DType>
370 std::function<bool(DType)> get_checker(
CheckerType ct) {
373 return [](DType x) {
return x < 0; };
375 return [](DType x) {
return x > 0; };
377 return [](DType x) {
return x == 0; };
379 if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
380 std::is_same<DType, mshadow::half::half_t>::value) {
381 return [](DType x) {
return x != x; };
383 LOG(WARNING) <<
"NaNChecker only applies to float types. "
384 <<
"Lambda will always return false.";
388 if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
389 std::is_same<DType, mshadow::half::half_t>::value) {
390 return [](DType x) {
return x == (DType)1.0 / 0.0f || x == -(DType)1.0 / 0.0f; };
392 LOG(WARNING) <<
"InfChecker only applies to float types. "
393 <<
"Lambda will always return false.";
397 if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
398 std::is_same<DType, mshadow::half::half_t>::value) {
399 return [](DType x) {
return x == (DType)1.0 / 0.0f; };
401 LOG(WARNING) <<
"PositiveInfChecker only applies to float types. "
402 <<
"Lambda will always return false.";
406 if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
407 std::is_same<DType, mshadow::half::half_t>::value) {
408 return [](DType x) {
return x == -(DType)1.0 / 0.0f; };
410 LOG(WARNING) <<
"NegativeInfChecker only applies to float types. "
411 <<
"Lambda will always return false.";
415 if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
416 std::is_same<DType, mshadow::half::half_t>::value) {
417 return [](DType x) {
return x != (DType)1.0 / 0.0f && x != -(DType)1.0 / 0.0f; };
419 LOG(WARNING) <<
"FiniteChecker only applies to float types. "
420 <<
"Lambda will always return false.";
424 if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
425 std::is_same<DType, mshadow::half::half_t>::value) {
427 [](DType x) {
return x != (DType)1.0 / 0.0f && x != -(DType)1.0 / 0.0f && x == x; };
429 LOG(WARNING) <<
"NormalChecker only applies to float types. "
430 <<
"Lambda will always return false.";
434 if (std::is_same<DType, float>::value || std::is_same<DType, double>::value ||
435 std::is_same<DType, mshadow::half::half_t>::value) {
437 [](DType x) {
return x == (DType)1.0 / 0.0f || x == -(DType)1.0 / 0.0f || x != x; };
439 LOG(WARNING) <<
"AbnormalChecker only applies to float types. "
440 <<
"Lambda will always return false.";
444 return [](DType x) {
return false; };
446 return [](DType x) {
return false; };
453 std::vector<index_t> index_to_coordinates(
index_t idx) {
454 const int dimension = tb_.
ndim();
455 std::vector<index_t> ret;
456 for (
int i = dimension - 1; i >= 0; --i) {
457 ret.push_back(idx % tb_.
shape_[i]);
460 std::reverse(ret.begin(), ret.end());
473 template <
typename DType>
474 void check_value_helper(std::vector<std::vector<index_t>>* ret,
475 const std::function<
bool(DType)>& checker,
481 .check_value_helper<DType>(ret, checker, interactive, tag);
483 #endif // MXNET_USE_CUDA
485 std::stringstream ss;
487 bool first_pass =
true;
489 if (checker(tb_.
dptr<DType>()[i])) {
495 std::vector<index_t> coords = index_to_coordinates(i);
496 ss <<
"(" << coords[0];
497 for (
size_t i = 1; i < coords.size(); ++i) {
498 ss <<
", " << coords[i];
501 ret->push_back(coords);
504 ss <<
"]" << std::endl;
509 std::cout <<
"----------Value Check----------" << std::endl;
510 tensor_info_to_string<DType>(&std::cout);
512 std::cout <<
"Tag: " << tag
516 std::cout << count <<
" value(s) found." << std::endl;
517 std::cout <<
"To print a part of the tensor,"
518 <<
" please specify a position, seperated by \",\"" << std::endl;
519 std::cout <<
"\"e\" for the entire tensor, "
520 <<
"\"p\" to print the coordinates of the values found, "
521 <<
"\"b\" to break, "
522 <<
"\"s\" to skip all: ";
527 }
else if (str ==
"e") {
528 to_string_helper<DType>(&std::cout);
530 }
else if (str ==
"p") {
531 std::cout << ss.str() << std::endl;
533 }
else if (str ==
"s") {
537 std::vector<index_t> pos;
538 if (parse_position(&pos, str)) {
539 std::vector<index_t> sub_shape;
541 print_locator(pos, &sub_shape, &offset);
542 to_string_helper<DType>(&std::cout, sub_shape, offset);
544 std::cout <<
"invalid command/indices" << std::endl;
554 inline char infer_type(
const std::type_info& ti) {
555 if (ti ==
typeid(
float))
557 else if (ti ==
typeid(
double))
559 else if (ti ==
typeid(mshadow::half::half_t))
561 else if (ti ==
typeid(uint8_t))
563 else if (ti ==
typeid(int32_t))
565 else if (ti ==
typeid(int64_t))
575 inline std::string infer_type_string(
const std::type_info& ti) {
576 if (ti ==
typeid(
float))
578 else if (ti ==
typeid(
double))
580 else if (ti ==
typeid(mshadow::half::half_t))
581 return "mshasow::half::half_t";
582 else if (ti ==
typeid(uint8_t))
584 else if (ti ==
typeid(int32_t))
586 else if (ti ==
typeid(int64_t))
589 return "unknown tyoe";
595 inline char endian_test() {
597 return (
reinterpret_cast<char*
>(&x)[0]) ?
'<' :
'>';
604 template <
typename DType>
605 std::string get_header() {
606 const int dimension = tb_.
ndim();
608 dict +=
"{'descr':'";
609 dict += endian_test();
610 dict += infer_type(
typeid(DType));
612 dict +=
"','fortran_order':False,'shape':(";
614 for (
int i = 1; i < dimension; ++i) {
618 if (dimension == 1) {
622 int padding_size = 64 - ((10 + dict.size()) % 64);
623 dict += std::string(padding_size,
' ');
626 header +=
static_cast<char>(0x93);
628 header +=
static_cast<char>(0x01);
629 header +=
static_cast<char>(0x00);
630 header +=
static_cast<char>((uint16_t)dict.size() & 0x00ff);
631 header +=
static_cast<char>(((uint16_t)dict.size() >> 8) & 0x00ff);
642 template <
typename DType>
643 void write_npy(
const std::string& header,
const std::string& filename) {
645 file.exceptions(std::ofstream::failbit | std::ofstream::badbit);
647 file.open(filename, std::ios::out | std::ios::binary);
648 file.write(header.c_str(), header.size());
649 file.write(
reinterpret_cast<char*
>(tb_.
dptr<DType>()),
sizeof(DType) * tb_.
shape_.
Size());
651 std::cout <<
"Tensor dumped to file: " << filename << std::endl;
652 }
catch (std::ofstream::failure e) {
653 std::cerr <<
"Exception opening/writing/closing file " << filename << std::endl;
663 template <
typename DType>
664 void dump_to_file_helper(
const std::string& tag) {
667 TensorInspector(test::CAccessAsCPU(ctx_, tb_,
false)(), ctx_).dump_to_file_helper<DType>(tag);
670 #endif // MXNET_USE_CUDA
671 std::string header = get_header<DType>();
674 std::string filename = tag +
"_" +
std::to_string(visit) +
".npy";
675 write_npy<DType>(header, filename);
681 inline void validate_shape() {
682 const int dimension = tb_.
ndim();
683 CHECK(dimension > 0) <<
"Tensor Inspector does not support empty tensors "
684 <<
"or tensors of unknow shape.";
685 for (
int i = 0; i < dimension; ++i) {
686 CHECK(tb_.
shape_[i] != 0) <<
"Invalid tensor shape: shape_[" << i <<
"] is 0";
704 template <
typename Device,
int dimension,
typename DType>
706 : tb_(ts), ctx_(ctx) {
739 std::stringstream ss;
760 template <
typename ValueChecker>
761 std::vector<std::vector<index_t>>
check_value(
const ValueChecker& checker,
762 bool interactive =
false,
763 std::string tag =
"") {
764 std::vector<std::vector<index_t>> ret;
766 check_value_helper<DType>(&ret, checker, ret, interactive, tag);
779 bool interactive =
false,
780 std::string tag =
"") {
781 std::vector<std::vector<index_t>> ret;
783 check_value_helper<DType>(&ret, get_checker<DType>(ct), interactive, tag);
799 #endif // MXNET_COMMON_TENSOR_INSPECTOR_H_