Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 51 additions & 0 deletions tensorflow_io/core/python/ops/csv_io_tensor_ops.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CSVIOTensor"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import uuid

import tensorflow as tf
from tensorflow_io.core.python.ops import io_tensor_ops
from tensorflow_io.core.python.ops import core_ops

class CSVIOTensor(io_tensor_ops._TableIOTensor): # pylint: disable=protected-access
"""CSVIOTensor"""

#=============================================================================
# Constructor (private)
#=============================================================================
def __init__(self,
filename,
internal=False):
with tf.name_scope("CSVIOTensor") as scope:
resource, columns = core_ops.csv_indexable_init(
filename,
container=scope,
shared_name="%s/%s" % (filename, uuid.uuid4().hex))
columns = [column.decode() for column in columns.numpy().tolist()]
spec = []
for column in columns:
shape, dtype = core_ops.csv_indexable_spec(resource, column)
shape = tf.TensorShape(shape)
dtype = tf.as_dtype(dtype.numpy())
spec.append(tf.TensorSpec(shape, dtype, column))
spec = tuple(spec)
super(CSVIOTensor, self).__init__(
spec, columns,
resource, core_ops.csv_indexable_get_item,
internal=internal)
18 changes: 18 additions & 0 deletions tensorflow_io/core/python/ops/io_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from tensorflow_io.core.python.ops import lmdb_io_tensor_ops
from tensorflow_io.core.python.ops import prometheus_io_tensor_ops
from tensorflow_io.core.python.ops import feather_io_tensor_ops
from tensorflow_io.core.python.ops import csv_io_tensor_ops

class IOTensor(io_tensor_ops._IOTensor): # pylint: disable=protected-access
"""IOTensor
Expand Down Expand Up @@ -364,3 +365,20 @@ def from_hdf5(cls,
"""
with tf.name_scope(kwargs.get("name", "IOFromHDF5")):
return hdf5_io_tensor_ops.HDF5IOTensor(filename, internal=True)

@classmethod
def from_csv(cls,
filename,
**kwargs):
"""Creates an `IOTensor` from an csv file.

Args:
filename: A string, the filename of an csv file.
name: A name prefix for the IOTensor (optional).

Returns:
A `IOTensor`.

"""
with tf.name_scope(kwargs.get("name", "IOFromCSV")):
return csv_io_tensor_ops.CSVIOTensor(filename, internal=True)
2 changes: 2 additions & 0 deletions tensorflow_io/text/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ load(
cc_library(
name = "text_ops",
srcs = [
"kernels/csv_kernels.cc",
"kernels/csv_output.cc",
"kernels/text_kernels.cc",
"kernels/text_output.cc",
Expand All @@ -23,6 +24,7 @@ cc_library(
],
linkstatic = True,
deps = [
"//tensorflow_io/arrow:arrow_ops",
"//tensorflow_io/core:dataset_ops",
"//tensorflow_io/core:output_ops",
"//tensorflow_io/core:sequence_ops",
Expand Down
225 changes: 225 additions & 0 deletions tensorflow_io/text/kernels/csv_kernels.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,225 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/

#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow_io/core/kernels/stream.h"
#include "tensorflow/core/lib/io/buffered_inputstream.h"
#include "tensorflow_io/core/kernels/io_interface.h"
#include "tensorflow_io/core/kernels/stream.h"
#include "arrow/memory_pool.h"
#include "arrow/csv/reader.h"
#include "arrow/table.h"
#include "tensorflow_io/arrow/kernels/arrow_kernels.h"

namespace tensorflow {
namespace data {

class CSVIndexable : public IOIndexableInterface {
public:
CSVIndexable(Env* env)
: env_(env) {}

~CSVIndexable() {}
Status Init(const std::vector<string>& input, const std::vector<string>& metadata, const void* memory_data, const int64 memory_size) override {
if (input.size() > 1) {
return errors::InvalidArgument("more than 1 filename is not supported");
}
const string& filename = input[0];
file_.reset(new SizedRandomAccessFile(env_, filename, memory_data, memory_size));
TF_RETURN_IF_ERROR(file_->GetFileSize(&file_size_));

csv_file_.reset(new ArrowRandomAccessFile(file_.get(), file_size_));

::arrow::Status status;

status = ::arrow::csv::TableReader::Make(::arrow::default_memory_pool(), csv_file_, ::arrow::csv::ReadOptions::Defaults(), ::arrow::csv::ParseOptions::Defaults(), ::arrow::csv::ConvertOptions::Defaults(), &reader_);
if (!status.ok()) {
return errors::InvalidArgument("unable to make a TableReader: ", status);
}
status = reader_->Read(&table_);
if (!status.ok()) {
return errors::InvalidArgument("unable to read table: ", status);
}

for (int i = 0; i < table_->num_columns(); i++) {
::tensorflow::DataType dtype;
switch (table_->column(i)->type()->id()) {
case ::arrow::Type::BOOL:
dtype = ::tensorflow::DT_BOOL;
break;
case ::arrow::Type::UINT8:
dtype= ::tensorflow::DT_UINT8;
break;
case ::arrow::Type::INT8:
dtype= ::tensorflow::DT_INT8;
break;
case ::arrow::Type::UINT16:
dtype= ::tensorflow::DT_UINT16;
break;
case ::arrow::Type::INT16:
dtype= ::tensorflow::DT_INT16;
break;
case ::arrow::Type::UINT32:
dtype= ::tensorflow::DT_UINT32;
break;
case ::arrow::Type::INT32:
dtype= ::tensorflow::DT_INT32;
break;
case ::arrow::Type::UINT64:
dtype= ::tensorflow::DT_UINT64;
break;
case ::arrow::Type::INT64:
dtype= ::tensorflow::DT_INT64;
break;
case ::arrow::Type::HALF_FLOAT:
dtype= ::tensorflow::DT_HALF;
break;
case ::arrow::Type::FLOAT:
dtype= ::tensorflow::DT_FLOAT;
break;
case ::arrow::Type::DOUBLE:
dtype= ::tensorflow::DT_DOUBLE;
break;
case ::arrow::Type::STRING:
case ::arrow::Type::BINARY:
case ::arrow::Type::FIXED_SIZE_BINARY:
case ::arrow::Type::DATE32:
case ::arrow::Type::DATE64:
case ::arrow::Type::TIMESTAMP:
case ::arrow::Type::TIME32:
case ::arrow::Type::TIME64:
case ::arrow::Type::INTERVAL:
case ::arrow::Type::DECIMAL:
case ::arrow::Type::LIST:
case ::arrow::Type::STRUCT:
case ::arrow::Type::UNION:
case ::arrow::Type::DICTIONARY:
case ::arrow::Type::MAP:
default:
return errors::InvalidArgument("arrow data type is not supported: ", table_->column(i)->type()->ToString());
}
shapes_.push_back(TensorShape({static_cast<int64>(table_->num_rows())}));
dtypes_.push_back(dtype);
columns_.push_back(table_->column(i)->name());
columns_index_[table_->column(i)->name()] = i;
}

return Status::OK();
}
Status Component(Tensor* component) override {
*component = Tensor(DT_STRING, TensorShape({static_cast<int64>(columns_.size())}));
for (size_t i = 0; i < columns_.size(); i++) {
component->flat<string>()(i) = columns_[i];
}
return Status::OK();
}
Status Spec(const Tensor& component, PartialTensorShape* shape, DataType* dtype) override {
if (columns_index_.find(component.scalar<string>()()) == columns_index_.end()) {
return errors::InvalidArgument("component ", component.scalar<string>()(), " is invalid");
}
int64 column_index = columns_index_[component.scalar<string>()()];
*shape = shapes_[column_index];
*dtype = dtypes_[column_index];
return Status::OK();
}

Status GetItem(const int64 start, const int64 stop, const int64 step, const Tensor& component, Tensor* tensor) override {
if (step != 1) {
return errors::InvalidArgument("step ", step, " is not supported");
}
if (columns_index_.find(component.scalar<string>()()) == columns_index_.end()) {
return errors::InvalidArgument("component ", component.scalar<string>()(), " is invalid");
}
int64 column_index = columns_index_[component.scalar<string>()()];

std::shared_ptr<::arrow::Column> slice = table_->column(column_index)->Slice(start, stop);

#define PROCESS_TYPE(TTYPE,ATYPE) { \
int64 curr_index = 0; \
for (auto chunk : slice->data()->chunks()) { \
for (int64_t item = 0; item < chunk->length(); item++) { \
tensor->flat<TTYPE>()(curr_index) = (dynamic_cast<ATYPE *>(chunk.get()))->Value(item); \
curr_index++; \
} \
} \
}
switch (tensor->dtype()) {
case DT_BOOL:
PROCESS_TYPE(bool, ::arrow::BooleanArray);
break;
case DT_INT8:
PROCESS_TYPE(int8, ::arrow::NumericArray<::arrow::Int8Type>);
break;
case DT_UINT8:
PROCESS_TYPE(uint8, ::arrow::NumericArray<::arrow::UInt8Type>);
break;
case DT_INT16:
PROCESS_TYPE(int16, ::arrow::NumericArray<::arrow::Int16Type>);
break;
case DT_UINT16:
PROCESS_TYPE(uint16, ::arrow::NumericArray<::arrow::UInt16Type>);
break;
case DT_INT32:
PROCESS_TYPE(int32, ::arrow::NumericArray<::arrow::Int32Type>);
break;
case DT_UINT32:
PROCESS_TYPE(uint32, ::arrow::NumericArray<::arrow::UInt32Type>);
break;
case DT_INT64:
PROCESS_TYPE(int64, ::arrow::NumericArray<::arrow::Int64Type>);
break;
case DT_UINT64:
PROCESS_TYPE(uint64, ::arrow::NumericArray<::arrow::UInt64Type>);
break;
case DT_FLOAT:
PROCESS_TYPE(float, ::arrow::NumericArray<::arrow::FloatType>);
break;
case DT_DOUBLE:
PROCESS_TYPE(double, ::arrow::NumericArray<::arrow::DoubleType>);
break;
default:
return errors::InvalidArgument("data type is not supported: ", DataTypeString(tensor->dtype()));
}

return Status::OK();
}

string DebugString() const override {
mutex_lock l(mu_);
return strings::StrCat("CSVIndexable");
}
private:
mutable mutex mu_;
Env* env_ GUARDED_BY(mu_);
std::unique_ptr<SizedRandomAccessFile> file_ GUARDED_BY(mu_);
uint64 file_size_ GUARDED_BY(mu_);
std::shared_ptr<ArrowRandomAccessFile> csv_file_;
std::shared_ptr<::arrow::csv::TableReader> reader_;
std::shared_ptr<::arrow::Table> table_;

std::vector<DataType> dtypes_;
std::vector<TensorShape> shapes_;
std::vector<string> columns_;
std::unordered_map<string, int64> columns_index_;
};

REGISTER_KERNEL_BUILDER(Name("CSVIndexableInit").Device(DEVICE_CPU),
IOInterfaceInitOp<CSVIndexable>);
REGISTER_KERNEL_BUILDER(Name("CSVIndexableSpec").Device(DEVICE_CPU),
IOInterfaceSpecOp<CSVIndexable>);
REGISTER_KERNEL_BUILDER(Name("CSVIndexableGetItem").Device(DEVICE_CPU),
IOIndexableGetItemOp<CSVIndexable>);
} // namespace data
} // namespace tensorflow
41 changes: 41 additions & 0 deletions tensorflow_io/text/ops/text_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,4 +135,45 @@ REGISTER_OP("TextOutputSequenceSetItem")
.SetIsStateful()
.SetShapeFn(shape_inference::ScalarShape);

REGISTER_OP("CSVIndexableInit")
.Input("input: string")
.Output("output: resource")
.Output("component: string")
.Attr("container: string = ''")
.Attr("shared_name: string = ''")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->Scalar());
c->set_output(1, c->MakeShape({}));
return Status::OK();
});

REGISTER_OP("CSVIndexableSpec")
.Input("input: resource")
.Input("component: string")
.Output("shape: int64")
.Output("dtype: int64")
.SetShapeFn([](shape_inference::InferenceContext* c) {
c->set_output(0, c->MakeShape({c->UnknownDim()}));
c->set_output(1, c->MakeShape({}));
return Status::OK();
});

REGISTER_OP("CSVIndexableGetItem")
.Input("input: resource")
.Input("start: int64")
.Input("stop: int64")
.Input("step: int64")
.Input("component: string")
.Output("output: dtype")
.Attr("shape: shape")
.Attr("dtype: type")
.SetShapeFn([](shape_inference::InferenceContext* c) {
PartialTensorShape shape;
TF_RETURN_IF_ERROR(c->GetAttr("shape", &shape));
shape_inference::ShapeHandle entry;
TF_RETURN_IF_ERROR(c->MakeShapeFromPartialTensorShape(shape, &entry));
c->set_output(0, entry);
return Status::OK();
});

} // namespace tensorflow
Loading