Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -47,3 +47,5 @@ compile_commands.json
CTestTestfile.cmake
install_manifest.txt
Makefile

*.png
6 changes: 6 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,12 @@ ELSE()
ADD_DEFINITIONS(-DUSE_FAKE_CUDA_RUNTIME)
ENDIF()

IF(APPLE)
ADD_DEFINITIONS(-DHAVE_STD_CPP_FS)
ELSE()
LINK_LIBRARIES(stdc++fs)
ENDIF()

IF(BUILD_TESTS)
ENABLE_TESTING()
INCLUDE(cmake/tests.cmake)
Expand Down
8 changes: 8 additions & 0 deletions CODE_OF_CONDUCT
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
"All things were made of numbers."
Pythagoras (c. 570 BC ~ c. 495 BC)

"Beauty is the first test: there is no permanent place in the world for ugly mathematics."
G. H. Hardy (1877 - 1947)

"I, in any case, am convinced that He does not play dice."
Albert Einstein (1879 - 1955)
11 changes: 5 additions & 6 deletions cmake/tests.cmake
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
INCLUDE(${CMAKE_SOURCE_DIR}/cmake/gtest.cmake)

FUNCTION(ADD_GTEST target)
FUNCTION(ADD_UNIT_TEST target)
ADD_EXECUTABLE(${target} ${ARGN} tests/main.cpp)
TARGET_USE_GTEST(${target})
TARGET_INCLUDE_DIRECTORIES(${target}
PRIVATE ${CMAKE_SOURCE_DIR}/tests/include)
TARGET_LINK_LIBRARIES(${target} stdtensor)
IF(HAVE_CUDA)
TARGET_LINK_LIBRARIES(${target} cudart)
Expand All @@ -13,9 +15,6 @@ ENDFUNCTION()
FILE(GLOB tests tests/test_*.cpp)
FOREACH(t ${tests})
GET_FILENAME_COMPONENT(name ${t} NAME_WE)
STRING(REPLACE "_"
"-"
name
${name})
ADD_GTEST(${name} ${t})
STRING(REPLACE "_" "-" name ${name})
ADD_UNIT_TEST(${name} ${t})
ENDFOREACH()
6 changes: 4 additions & 2 deletions examples/example_opencv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@ using bmp_t = ttl::matrix<pixel_t>;

void save_bmp(const bmp_t &bmp)
{
const cv::Mat img(cv::Size(bmp.shape().dims()[1], bmp.shape().dims()[0]),
CV_8UC(3), (void *)bmp.data());
uint32_t h, w;
std::tie(h, w) = bmp.dims();
// const auto [h, w] = bmp.dims(); // c++17
const cv::Mat img(cv::Size(w, h), CV_8UC(3), bmp.data());
cv::imwrite("i.png", img);
}

Expand Down
3 changes: 0 additions & 3 deletions include/stdtensor

This file was deleted.

1 change: 1 addition & 0 deletions include/ttl/algorithm
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ namespace ttl
{
using ttl::internal::argmax;
using ttl::internal::cast;
using ttl::internal::chebyshev_distenace;
using ttl::internal::fill;
using ttl::internal::hamming_distance;
using ttl::internal::max;
Expand Down
6 changes: 6 additions & 0 deletions include/ttl/bits/flat_tensor_mixin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,12 @@ class flat_tensor_mixin
using shape_type = S;
using device_type = D;

rank_t rank() const { return shape_.rank(); }

Dim size() const { return shape_.size(); }

const auto &dims() const { return shape_.dims(); }

size_t data_size() const { return shape_.size() * sizeof(R); }

const S &shape() const { return shape_; }
Expand Down
9 changes: 9 additions & 0 deletions include/ttl/bits/raw_tensor_mixin.hpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
#pragma once
#include <stdexcept>
#include <ttl/bits/raw_shape.hpp>
#include <ttl/bits/std_access_traits.hpp>
#include <ttl/bits/std_tensor_fwd.hpp>
#include <ttl/bits/std_tensor_traits.hpp>

Expand Down Expand Up @@ -86,6 +87,14 @@ class raw_tensor_mixin
return reinterpret_cast<ptr_type>(data_.get());
}

template <typename R>
auto typed() const
{
using Access = typename basic_access_traits<A>::type;
using T = basic_tensor<R, basic_raw_shape<Dim>, D, Access>;
return T(data<R>(), shape_);
}

template <typename R, rank_t r, typename A1 = A>
basic_tensor<R, basic_shape<r, Dim>, D, A1> ranked_as() const
{
Expand Down
29 changes: 29 additions & 0 deletions include/ttl/bits/std_access_traits.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#pragma once

namespace ttl
{
namespace internal
{
struct owner;
struct readwrite;
struct readonly;

template <typename A>
struct basic_access_traits;

template <>
struct basic_access_traits<owner> {
using type = readwrite;
};

template <>
struct basic_access_traits<readwrite> {
using type = readwrite;
};

template <>
struct basic_access_traits<readonly> {
using type = readonly;
};
} // namespace internal
} // namespace ttl
13 changes: 13 additions & 0 deletions include/ttl/bits/std_host_tensor_algo.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,19 @@ Dim hamming_distance(const basic_host_tensor_view<R, r, Dim> &x,
std::not_equal_to<R>());
}

template <typename R, rank_t r, typename Dim>
R chebyshev_distenace(const basic_host_tensor_view<R, r, Dim> &x,
const basic_host_tensor_view<R, r, Dim> &y)
{
return std::inner_product(
x.data(), x.data_end(), y.data(), static_cast<R>(0),
[](R a, R d) { return std::max<R>(a, d); },
[](R x, R y) {
// FIXME: make sure it is commutative for floats
return x > y ? x - y : y - x;
});
}

template <typename R, rank_t r, typename Dim>
R max(const basic_host_tensor_view<R, r, Dim> &t)
{
Expand Down
6 changes: 6 additions & 0 deletions include/ttl/bits/std_tensor_mixin.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,12 @@ class basic_scalar_mixin
data_ptr data_end() const { return data_.get() + 1; }

S shape() const { return S(); }

data_ref at() const
{ // FIXME: support other devices
static_assert(std::is_same<D, host_memory>::value, "");
return data_.get()[0];
}
};

template <typename R, typename S, typename D, typename A>
Expand Down
7 changes: 2 additions & 5 deletions include/ttl/bits/std_tensor_traits.hpp
Original file line number Diff line number Diff line change
@@ -1,15 +1,12 @@
#pragma once
#include <memory>
#include <ttl/bits/std_access_traits.hpp>
#include <ttl/bits/std_tensor_fwd.hpp>

namespace ttl
{
namespace internal
{
struct owner;
struct readwrite;
struct readonly;

template <typename R, typename D>
using own_ptr = std::unique_ptr<R[], basic_deallocator<R, D>>;

Expand Down Expand Up @@ -45,7 +42,7 @@ struct basic_tensor_traits<R, owner, D> {
using ref_type = R &;

using Data = own_ptr<R, D>;
using Access = readwrite;
using Access = readwrite; // FIXME: use basic_access_traits
};

template <typename R, typename D>
Expand Down
21 changes: 21 additions & 0 deletions tests/include/ttl/filesystem
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// -*- mode: c++ -*-
#pragma once

#ifdef HAVE_STD_CPP_FS
#include <filesystem>

namespace std
{
namespace filesystem = std::__fs::filesystem;
}

#else

#include <experimental/filesystem>

namespace std
{
namespace filesystem = std::experimental::filesystem;
}

#endif
47 changes: 31 additions & 16 deletions tests/test_algo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ TEST(tensor_algo_test, test_argmax)
using R = float;
ttl::tensor<R, 1> t(10);
std::iota(t.data(), t.data_end(), 1);
ASSERT_EQ(static_cast<uint32_t>(9), ttl::argmax(view(t)));
ASSERT_EQ(static_cast<uint32_t>(9), ttl::argmax(ttl::view(t)));
}

TEST(tensor_algo_test, test_cast)
Expand All @@ -23,22 +23,22 @@ TEST(tensor_algo_test, test_cast)
});

ttl::tensor<int, 1> y(n);
ttl::cast(view(x), ref(y));
ttl::cast(ttl::view(x), ttl::ref(y));

ASSERT_EQ(5, ttl::sum(view(y)));
ASSERT_EQ(5, ttl::sum(ttl::view(y)));
}

TEST(tensor_algo_test, test_fill)
{
{
using R = int;
ttl::tensor<R, 1> t(10);
ttl::fill(ref(t), 1);
ttl::fill(ttl::ref(t), 1);
}
{
using R = float;
ttl::tensor<R, 1> t(10);
ttl::fill(ref(t), static_cast<R>(1.1));
ttl::fill(ttl::ref(t), static_cast<R>(1.1));
}
}

Expand All @@ -47,11 +47,26 @@ TEST(tensor_algo_test, test_hamming_distance)
using R = int;
int n = 0xffff;
ttl::tensor<R, 1> x(n);
ttl::fill(ref(x), -1);
ttl::fill(ttl::ref(x), -1);
ttl::tensor<R, 1> y(n);
ttl::fill(ref(y), 1);
ttl::fill(ttl::ref(y), 1);
ASSERT_EQ(static_cast<uint32_t>(n),
ttl::hamming_distance(view(x), view(y)));
ttl::hamming_distance(ttl::view(x), ttl::view(y)));
}

TEST(tensor_algo_test, chebyshev_distenace)
{
using R = int;
int n = 0xffff;
ttl::tensor<R, 1> x(n);
ttl::tensor<R, 1> y(n);
std::iota(x.data(), x.data_end(), 1);
std::iota(y.data(), y.data_end(), 1);
ASSERT_EQ(static_cast<R>(0),
ttl::chebyshev_distenace(ttl::view(x), ttl::view(y)));
std::reverse(y.data(), y.data_end());
ASSERT_EQ(static_cast<R>(n - 1),
ttl::chebyshev_distenace(ttl::view(x), ttl::view(y)));
}

TEST(tensor_algo_test, test_summaries_int)
Expand All @@ -60,10 +75,10 @@ TEST(tensor_algo_test, test_summaries_int)
const int n = 10;
ttl::tensor<R, 1> x(n);
std::iota(x.data(), x.data_end(), -5);
ASSERT_EQ(-5, ttl::min(view(x)));
ASSERT_EQ(4, ttl::max(view(x)));
ASSERT_EQ(-5, ttl::sum(view(x)));
ASSERT_EQ(0, ttl::mean(view(x)));
ASSERT_EQ(-5, ttl::min(ttl::view(x)));
ASSERT_EQ(4, ttl::max(ttl::view(x)));
ASSERT_EQ(-5, ttl::sum(ttl::view(x)));
ASSERT_EQ(0, ttl::mean(ttl::view(x)));
}

TEST(tensor_algo_test, test_summaries_float)
Expand All @@ -72,8 +87,8 @@ TEST(tensor_algo_test, test_summaries_float)
const int n = 10;
ttl::tensor<R, 1> x(n);
std::iota(x.data(), x.data_end(), -5);
ASSERT_EQ(-5, ttl::min(view(x)));
ASSERT_EQ(4, ttl::max(view(x)));
ASSERT_EQ(-5, ttl::sum(view(x)));
ASSERT_EQ(-0.5, ttl::mean(view(x)));
ASSERT_EQ(-5, ttl::min(ttl::view(x)));
ASSERT_EQ(4, ttl::max(ttl::view(x)));
ASSERT_EQ(-5, ttl::sum(ttl::view(x)));
ASSERT_EQ(-0.5, ttl::mean(ttl::view(x)));
}
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include "testing.hpp"

#include <stdtensor>
#include <ttl/tensor>

using ttl::tensor;
using ttl::tensor_ref;
Expand Down
3 changes: 1 addition & 2 deletions tests/_test_loc.cpp → tests/test_loc.cpp
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
#include <cstdlib>
#include <filesystem>
#include <iostream>
#include <ttl/filesystem>

#include "testing.hpp"

Expand All @@ -20,7 +20,6 @@ int loc(const char *filename)

TEST(test_loc, test1)
{
std::string path = "/path/to/directory";
int tot = 0;
int n = 0;
for (const auto &entry : fs::directory_iterator("include/ttl/bits")) {
Expand Down
38 changes: 37 additions & 1 deletion tests/test_raw_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ TEST(raw_tensor_test, test_convert)
}
}

template <typename R, typename T> void test_raw_accessors(const T &t)
template <typename R, typename T>
void test_raw_accessors(const T &t)
{
t.shape();
t.value_type();
Expand Down Expand Up @@ -124,3 +125,38 @@ TEST(raw_tensor_test, test_data)
test_raw_accessors<R>(rr);
test_raw_accessors<R>(rv);
}

#include <ttl/experimental/flat_tensor>

TEST(raw_tensor_test, test_convert_to_flat)
{
using ttl::experimental::raw_tensor;
using ttl::experimental::raw_tensor_ref;
using ttl::experimental::raw_tensor_view;
using encoder = raw_tensor::encoder_type;
using raw_shape = raw_tensor::shape_type;

raw_tensor rt(encoder::value<float>(), 1, 2, 3);
{
static_assert(
std::is_same<decltype(rt.typed<float>()),
ttl::experimental::flat_tensor_ref<float>>::value,
"");
ttl::experimental::flat_tensor_ref<float> ft = rt.typed<float>();
ASSERT_EQ(ft.size(), static_cast<raw_shape::dimension_type>(6));
}
{
const raw_tensor_ref rtr(rt);
static_assert(
std::is_same<decltype(rtr.typed<float>()),
ttl::experimental::flat_tensor_ref<float>>::value,
"");
}
{
const raw_tensor_view rtv(rt);
static_assert(
std::is_same<decltype(rtv.typed<float>()),
ttl::experimental::flat_tensor_view<float>>::value,
"");
}
}
Loading