Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -134,8 +134,8 @@ $ docker build -f dev/Dockerfile -t tfio-dev .
$ docker run -it --rm --net=host -v ${PWD}:/v -w /v tfio-dev
$ # In Docker, configure will install TensorFlow or use existing install
$ ./configure.sh
$ # Build TensorFlow I/O C++
$ bazel build -s --verbose_failures //tensorflow_io/...
$ # Build TensorFlow I/O C++. For compilation optimization flags, the default (-march=native) optimizes the generated code for your machine's CPU type. [see here](https://www.tensorflow.org/install/source#configuration_options)
$ bazel build -c opt --copt=-match=native -s --verbose_failures //tensorflow_io/...
$ # Run tests with PyTest, note: some tests require launching additional containers to run (see below)
$ pytest tests/
$ # Build the TensorFlow I/O package
Expand Down
6 changes: 5 additions & 1 deletion tensorflow_io/hdf5/kernels/hdf5_input.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,11 @@ class HDF5InputStream{
Tensor tensor(ctx->allocator({}), DT_INT32, shape);
dataset_[i].read(tensor.flat<int32>().data(), H5::PredType::NATIVE_INT, memoryspace, dataspace_[i]);
out_tensors->emplace_back(std::move(tensor));
} else if (H5Tequal(native_type, H5T_NATIVE_LONG)) {
} else if (H5Tequal(native_type, H5T_NATIVE_UINT32)) {
Tensor tensor(ctx->allocator({}), DT_UINT32, shape);
dataset_[i].read(tensor.flat<uint32>().data(), H5::PredType::NATIVE_UINT32, memoryspace, dataspace_[i]);
out_tensors->emplace_back(std::move(tensor));
}else if (H5Tequal(native_type, H5T_NATIVE_LONG)) {
Tensor tensor(ctx->allocator({}), DT_INT64, shape);
dataset_[i].read(tensor.flat<int64>().data(), H5::PredType::NATIVE_LONG, memoryspace, dataspace_[i]);
out_tensors->emplace_back(std::move(tensor));
Expand Down