Skip to content

Commit 4d9d253

Browse files
author
Will Feng
committed
[WIP]
1 parent 234bcff commit 4d9d253

File tree

3 files changed

+274
-0
lines changed

3 files changed

+274
-0
lines changed

cpp/autograd/CMakeLists.txt

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
cmake_minimum_required(VERSION 2.8)
2+
3+
project(autograd)
4+
set(CMAKE_CXX_STANDARD 14)
5+
6+
find_package(Torch REQUIRED)
7+
8+
add_executable(${PROJECT_NAME} "autograd.cpp")
9+
target_link_libraries(${PROJECT_NAME} "${TORCH_LIBRARIES}")
10+
11+
# The following code block is suggested to be used on Windows.
12+
# According to https://github.com/pytorch/pytorch/issues/25457,
13+
# the DLLs need to be copied to avoid memory errors.
14+
if (MSVC)
15+
file(GLOB TORCH_DLLS "${TORCH_INSTALL_PREFIX}/lib/*.dll")
16+
add_custom_command(TARGET ${PROJECT_NAME}
17+
POST_BUILD
18+
COMMAND ${CMAKE_COMMAND} -E copy_if_different
19+
${TORCH_DLLS}
20+
$<TARGET_FILE_DIR:${PROJECT_NAME}>)
21+
endif (MSVC)

cpp/autograd/README.md

Lines changed: 78 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,78 @@
1+
# C++ autograd example
2+
3+
`autograd.cpp` contains several examples of doing autograd in PyTorch C++ frontend.
4+
5+
To build the code, run the following commands from your terminal:
6+
7+
```shell
8+
$ cd autograd
9+
$ mkdir build
10+
$ cd build
11+
$ cmake -DCMAKE_PREFIX_PATH=/path/to/libtorch ..
12+
$ make
13+
```
14+
15+
where `/path/to/libtorch` should be the path to the unzipped *LibTorch*
16+
distribution, which you can get from the [PyTorch
17+
homepage](https://pytorch.org/get-started/locally/).
18+
19+
Execute the compiled binary to run:
20+
21+
```shell
22+
$ ./autograd
23+
====== Running: "Basic autograd operations" ======
24+
1 1
25+
1 1
26+
[ CPUFloatType{2,2} ]
27+
3 3
28+
3 3
29+
[ CPUFloatType{2,2} ]
30+
AddBackward1
31+
27 27
32+
27 27
33+
[ CPUFloatType{2,2} ]
34+
MulBackward1
35+
27
36+
[ CPUFloatType{} ]
37+
MeanBackward0
38+
false
39+
true
40+
SumBackward0
41+
4.5000 4.5000
42+
4.5000 4.5000
43+
[ CPUFloatType{2,2} ]
44+
-731.0470
45+
963.0721
46+
1236.4192
47+
[ CPUFloatType{3} ]
48+
MulBackward1
49+
102.4000
50+
1024.0000
51+
0.1024
52+
[ CPUFloatType{3} ]
53+
true
54+
true
55+
false
56+
true
57+
false
58+
true
59+
60+
====== Running "Computing higher-order gradients in C++" ======
61+
-0.0384 0.1510 -0.0288 0.0872
62+
-0.0105 -0.0936 -0.0553 -0.0222
63+
0.0589 -0.0848 -0.0730 0.0070
64+
[ CPUFloatType{3,4} ]
65+
66+
====== Running "Using custom autograd function in C++" ======
67+
-0.6962 -1.7728 1.4167
68+
-0.6962 -1.7728 1.4167
69+
[ CPUFloatType{2,3} ]
70+
1.5162 1.6421 1.2691
71+
1.5162 1.6421 1.2691
72+
1.5162 1.6421 1.2691
73+
1.5162 1.6421 1.2691
74+
[ CPUFloatType{4,3} ]
75+
5.5000
76+
5.5000
77+
[ CPUFloatType{2} ]
78+
```

cpp/autograd/autograd.cpp

Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,175 @@
1+
#include <torch/torch.h>
2+
#include <iostream>
3+
4+
using namespace torch::autograd;
5+
6+
void basic_autograd_operations_example() {
7+
std::cout << "====== Running: \"Basic autograd operations\" ======" << std::endl;
8+
9+
auto x = torch::ones({2, 2}, torch::requires_grad());
10+
std::cout << x << std::endl;
11+
12+
auto y = x + 2;
13+
std::cout << y << std::endl;
14+
15+
std::cout << y.grad_fn()->name() << std::endl;
16+
17+
auto z = y * y * 3;
18+
auto out = z.mean();
19+
20+
std::cout << z << std::endl;
21+
std::cout << z.grad_fn()->name() << std::endl;
22+
std::cout << out << std::endl;
23+
std::cout << out.grad_fn()->name() << std::endl;
24+
25+
auto a = torch::randn({2, 2});
26+
a = ((a * 3) / (a - 1));
27+
std::cout << a.requires_grad() << std::endl;
28+
29+
a.requires_grad_(true);
30+
std::cout << a.requires_grad() << std::endl;
31+
32+
auto b = (a * a).sum();
33+
std::cout << b.grad_fn()->name() << std::endl;
34+
35+
out.backward();
36+
std::cout << x.grad() << std::endl;
37+
38+
x = torch::randn(3, torch::requires_grad());
39+
40+
y = x * 2;
41+
while (y.norm().item<double>() < 1000) {
42+
y = y * 2;
43+
}
44+
45+
std::cout << y << std::endl;
46+
std::cout << y.grad_fn()->name() << std::endl;
47+
48+
auto v = torch::tensor({0.1, 1.0, 0.0001}, torch::kFloat);
49+
y.backward(v);
50+
51+
std::cout << x.grad() << std::endl;
52+
53+
std::cout << x.requires_grad() << std::endl;
54+
std::cout << x.pow(2).requires_grad() << std::endl;
55+
56+
{
57+
torch::NoGradGuard no_grad;
58+
std::cout << x.pow(2).requires_grad() << std::endl;
59+
}
60+
61+
std::cout << x.requires_grad() << std::endl;
62+
y = x.detach();
63+
std::cout << y.requires_grad() << std::endl;
64+
std::cout << x.eq(y).all().item<bool>() << std::endl;
65+
}
66+
67+
void compute_higher_order_gradients_example() {
68+
std::cout << "====== Running \"Computing higher-order gradients in C++\" ======" << std::endl;
69+
70+
auto model = torch::nn::Linear(4, 3);
71+
72+
auto input = torch::randn({3, 4}).requires_grad_(true);
73+
auto output = model(input);
74+
75+
// Calculate loss
76+
auto target = torch::randn({3, 3});
77+
auto loss = torch::nn::MSELoss()(output, target);
78+
79+
// Use norm of gradients as penalty
80+
auto grad_output = torch::ones_like(output);
81+
auto gradient = torch::autograd::grad({output}, {input}, /*grad_outputs=*/{grad_output},
82+
/*retain_graph=*/true, /*create_graph=*/true,
83+
/*allow_unused=*/true)[0];
84+
gradient = gradient.view({-1, 1});
85+
auto gradient_penalty = torch::pow((gradient.norm(2, /*dim=*/1) - 1), 2).mean();
86+
87+
// Add gradient penalty to loss
88+
auto combined_loss = loss + gradient_penalty;
89+
combined_loss.backward();
90+
91+
std::cout << input.grad() << std::endl;
92+
}
93+
94+
// Inherit from Function
95+
class LinearFunction : public Function<LinearFunction> {
96+
public:
97+
// Note that both forward and backward are static functions
98+
99+
// bias is an optional argument
100+
static Variable forward(AutogradContext *ctx, Variable input, Variable weight, Variable bias = Variable()) {
101+
ctx->save_for_backward({input, weight, bias});
102+
auto output = input.mm(weight.t());
103+
if (bias.defined()) {
104+
output += bias.unsqueeze(0).expand_as(output);
105+
}
106+
return output;
107+
}
108+
109+
static variable_list backward(AutogradContext *ctx, variable_list grad_outputs) {
110+
auto saved = ctx->get_saved_variables();
111+
auto input = saved[0];
112+
auto weight = saved[1];
113+
auto bias = saved[2];
114+
115+
auto grad_output = grad_outputs[0];
116+
auto grad_input = grad_output.mm(weight);
117+
auto grad_weight = grad_output.t().mm(input);
118+
auto grad_bias = Variable();
119+
if (bias.defined()) {
120+
grad_bias = grad_output.sum(0);
121+
}
122+
123+
return {grad_input, grad_weight, grad_bias};
124+
}
125+
};
126+
127+
class MulConstant : public Function<MulConstant> {
128+
public:
129+
static Variable forward(AutogradContext *ctx, Variable variable, double constant) {
130+
// ctx is a context object that can be used to stash information
131+
// for backward computation
132+
ctx->saved_data["constant"] = constant;
133+
return variable * constant;
134+
}
135+
136+
static variable_list backward(AutogradContext *ctx, variable_list grad_outputs) {
137+
// We return as many input gradients as there were arguments.
138+
// Gradients of non-Tensor arguments to forward must be `Variable()`.
139+
return {grad_outputs[0] * ctx->saved_data["constant"].toDouble(), Variable()};
140+
}
141+
};
142+
143+
void custom_autograd_function_example() {
144+
std::cout << "====== Running \"Using custom autograd function in C++\" ======" << std::endl;
145+
{
146+
auto x = torch::randn({2, 3}).requires_grad_();
147+
auto weight = torch::randn({4, 3}).requires_grad_();
148+
auto y = LinearFunction::apply(x, weight);
149+
y.sum().backward();
150+
151+
std::cout << x.grad() << std::endl;
152+
std::cout << weight.grad() << std::endl;
153+
}
154+
{
155+
auto x = torch::randn({2}).requires_grad_();
156+
auto y = MulConstant::apply(x, 5.5);
157+
y.sum().backward();
158+
159+
std::cout << x.grad() << std::endl;
160+
}
161+
}
162+
163+
int main() {
164+
std::cout << std::boolalpha;
165+
166+
basic_autograd_operations_example();
167+
168+
std::cout << "\n";
169+
170+
compute_higher_order_gradients_example();
171+
172+
std::cout << "\n";
173+
174+
custom_autograd_function_example();
175+
}

0 commit comments

Comments
 (0)