Skip to content

Commit d0cb064

Browse files
committed
implement copy task
1 parent 8289472 commit d0cb064

File tree

6 files changed

+62
-24
lines changed

6 files changed

+62
-24
lines changed

Makefile

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,30 @@
1+
export CXX_FLAGS := $(CXXFLAGS) -std=c++17 -g3 -O0 -pthread -D_GLIBCXX_USE_CXX11_ABI=0 -Wall -Wextra -Wno-unused-function -D_LIBCPP_DEBUG # -D_GLIBCXX_DEBUG
2+
# -ftrapv
3+
4+
export INCPATH := $(shell python -c "import torch.utils.cpp_extension as C; print('-isystem' + str.join(' -isystem', C.include_paths()))")
5+
6+
export LIBPATH := $(shell python -c "import torch.utils.cpp_extension as C; print(C.include_paths()[0] + '/../')")
7+
8+
export USE_CUDA := $(shell python -c "import torch; print(torch.cuda.is_available())")
9+
10+
ifeq ($(USE_CUDA),True)
11+
export TORCH_LIBS=-ltorch -lcaffe2 -lcaffe2_gpu -lc10 -lc10_cuda -lcuda -lnvrtc -lnvToolsExt # -lnccl -lmkldnn -lmkl_rt
12+
else
13+
export TORCH_LIBS=-ltorch -lcaffe2 -lc10
14+
endif
15+
16+
117
.PHONY: test clean install-conda install-latest
218

319
test:
420
$(MAKE) --directory test
521

22+
example-mnist:
23+
$(MAKE) --directory example/mnist
24+
25+
example-copy:
26+
$(MAKE) --directory example/copy
27+
628
clean:
729
find . -name "*.o" -exec rm -v {} \;
830
find . -name "*.out" -exec rm -v {} \;

example/copy/Makefile

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
.PHONY: run
2+
3+
run: main.out
4+
./main.out
5+
6+
main.out: main.cpp
7+
$(CXX) $(CXX_FLAGS) $^ -o $@ -Wl,-rpath,$(LIBPATH) -Wl,-rpath,$(CONDA_PREFIX)/lib $(TORCH_LIBS) $(KALDI_FLAGS) -L$(CONDA_PREFIX)/lib -L$(LIBPATH) $(INCPATH) -I../../include

example/copy/main.cpp

Lines changed: 29 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,41 @@
1+
/**
2+
toy task introduced in https://github.com/harvardnlp/annotated-transformer
3+
TODO: cuda backend, greedy/beam decoding
4+
*/
15
#include <torch/torch.h>
6+
#include <thxx/net.hpp>
27

38
constexpr std::int64_t idim = 11;
49
constexpr std::int64_t odim = 11;
510

6-
auto gen_data(std::int64_t batch_size) {
7-
auto x = torch::randint(0, idim - 1, {batch_size, 10});
8-
auto xlen = torch::empty({batch_size});
11+
auto gen_data(std::int64_t batch_size = 128) {
12+
auto x = torch::randint(1, idim - 1, {batch_size, 10}).to(at::kLong);
13+
std::vector<std::int64_t> xlen(batch_size, 10);
914
return std::make_tuple(x, xlen);
1015
}
1116

1217
int main() {
13-
for (int i = 0; i < 100; ++i) {
18+
thxx::net::transformer::Config config;
19+
config.heads = 2;
20+
config.d_model = 16;
21+
config.d_ff = 16;
22+
config.elayers = 1;
23+
config.dlayers = 1;
24+
25+
using InputLayer = thxx::net::transformer::PositonalEmbedding;
26+
thxx::net::Transformer<InputLayer> model(idim, odim, config);
27+
torch::optim::Adam optimizer(model.parameters(), 0.01);
28+
29+
for (int i = 1; i <= 1000; ++i) {
1430
auto [x, xlen] = gen_data();
31+
optimizer.zero_grad();
32+
auto [loss, acc] = model.forward(x, xlen, x, xlen);
33+
loss.backward();
34+
optimizer.step();
35+
if (i % 100 == 0) {
36+
std::cout << "step: " << i
37+
<< ", loss: " << loss.template item<double>()
38+
<< ", acc: " << acc << std::endl;
39+
}
1540
}
1641
}

include/thxx/net.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,8 @@ namespace thxx {
528528
auto target = tgt_out.view({-1});
529529
auto loss = label_smoothing_kl_div(pred.view({target.size(0), -1}), target,
530530
this->config.label_smoothing, this->ignore_index);
531-
return loss;
531+
auto acc = accuracy(pred, tgt_out, this->ignore_index);
532+
return std::make_tuple(loss, acc);
532533
}
533534
};
534535
} // namespace net

test/Makefile

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,3 @@
1-
# INSTALL: conda install pytorch-cpu=1.0.0 -c pytorch
2-
3-
CXX_FLAGS := $(CXXFLAGS) -std=c++17 -g3 -O0 -pthread -D_GLIBCXX_USE_CXX11_ABI=0 -Wall -Wextra -Wno-unused-function -D_LIBCPP_DEBUG # -D_GLIBCXX_DEBUG
4-
# -ftrapv
5-
6-
INCPATH := $(shell python -c "import torch.utils.cpp_extension as C; print('-isystem' + str.join(' -isystem', C.include_paths()))")
7-
8-
LIBPATH := $(shell python -c "import torch.utils.cpp_extension as C; print(C.include_paths()[0] + '/../')")
9-
10-
USE_CUDA := $(shell python -c "import torch; print(torch.cuda.is_available())")
11-
12-
ifeq ($(USE_CUDA),True)
13-
TORCH_LIBS=-ltorch -lcaffe2 -lcaffe2_gpu -lc10 -lc10_cuda -lcuda -lnvrtc -lnvToolsExt # -lnccl -lmkldnn -lmkl_rt
14-
else
15-
TORCH_LIBS=-ltorch -lcaffe2 -lc10
16-
endif
17-
181
.PHONY: all
192

203
all: test.out

test/test_net.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -161,13 +161,13 @@ TEST_CASE( "transformer", "[net]" ) {
161161
}
162162
{
163163
Transformer<T::Conv2dSubsampling> model(n_input, n_output, conf);
164-
auto loss = model.forward(x, xlen, t, ylen);
164+
auto [loss, acc] = model.forward(x, xlen, t, ylen);
165165
loss.backward();
166166
CHECK_THAT( model, testing::HasGrad(true) );
167167
}
168168
{
169169
Transformer<T::PositonalEmbedding> model(n_input, n_output, conf);
170-
auto loss = model.forward(t, ylen, t, ylen);
170+
auto [loss, acc] = model.forward(t, ylen, t, ylen);
171171
loss.backward();
172172
CHECK_THAT( model, testing::HasGrad(true) );
173173
}

0 commit comments

Comments
 (0)