Skip to content

Commit 70c0bde

Browse files
wconstabfacebook-github-bot
authored andcommitted
Replace all CHECK_ and DCHECK_ with TORCH_* macros (#82032) (#82032)
Summary: Avoid exposing defines that conflict with google logging, since this blocks external usage of libtorch in certain cases. All the 'interesting' changes should be in these two files, and the rest should just be mechanical changes via sed. c10/util/logging_is_not_google_glog.h c10/util/logging_is_google_glog.h Fixes #81415 cc miladm malfet Pull Request resolved: #82032 Approved by: https://github.com/soumith, https://github.com/miladm Test Plan: contbuild & OSS CI, see https://hud.pytorch.org/commit/pytorch/pytorch/4f34cd6d1e91dcd82ee30c3ea39bdb8a0fa93e8b Original Phabricator Test Plan: Imported from GitHub, without a `Test Plan:` line. Reviewed By: osalpekar Differential Revision: D38180841 Pulled By: wconstab fbshipit-source-id: b2f6c3fef609ec6ad616929a16d342e525ef0155
1 parent 5796522 commit 70c0bde

File tree

150 files changed

+680
-612
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

150 files changed

+680
-612
lines changed

aten/src/ATen/native/cpu/layer_norm_kernel.cpp

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ void LayerNormKernelImplInternal(
3535
Tensor* rstd) {
3636
using T_ACC = vec::vec_scalar_t<T>;
3737
using Vec = vec::Vectorized<T_ACC>;
38-
DCHECK_EQ(X.numel(), M * N);
38+
TORCH_DCHECK_EQ(X.numel(), M * N);
3939
DCHECK(!gamma.defined() || gamma.numel() == N);
4040
DCHECK(!beta.defined() || beta.numel() == N);
4141
const T* X_data = X.data_ptr<T>();
@@ -117,10 +117,10 @@ void LayerNormBackwardKernelImplInternal(
117117
Tensor* dbeta) {
118118
using T_ACC = vec::vec_scalar_t<T>;
119119
using Vec = vec::Vectorized<T_ACC>;
120-
DCHECK_EQ(dY.numel(), M * N);
121-
DCHECK_EQ(X.numel(), M * N);
122-
DCHECK_EQ(mean.numel(), M);
123-
DCHECK_EQ(rstd.numel(), M);
120+
TORCH_DCHECK_EQ(dY.numel(), M * N);
121+
TORCH_DCHECK_EQ(X.numel(), M * N);
122+
TORCH_DCHECK_EQ(mean.numel(), M);
123+
TORCH_DCHECK_EQ(rstd.numel(), M);
124124
DCHECK(!gamma.defined() || gamma.numel() == N);
125125
const T* dY_data = dY.template data_ptr<T>();
126126
const T* X_data = X.template data_ptr<T>();

aten/src/ATen/native/quantized/cpu/qsoftmax.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ Tensor qsoftmax_qnnpack(const Tensor& qx, const int64_t dim) {
9494
TORCH_CHECK(
9595
status == pytorch_qnnp_status_success,
9696
"failed to create QNNPACK Softmax operator");
97-
CHECK_NOTNULL(softargmax);
97+
TORCH_CHECK_NOTNULL(softargmax);
9898

9999
status = pytorch_qnnp_setup_softargmax_nc_q8(
100100
softargmax, batch_size, input, input_stride, output, output_stride);

benchmarks/static_runtime/deep_wide_pt_bench.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ static void BM_deep_wide_jit_graph_executor(benchmark::State& state) {
4747

4848
std::vector<IValue> inputs({ad_emb_packed, user_emb, wide});
4949

50-
CHECK_EQ(setenv("TORCH_JIT_DISABLE_NEW_EXECUTOR", "1", 1), 0);
50+
TORCH_CHECK_EQ(setenv("TORCH_JIT_DISABLE_NEW_EXECUTOR", "1", 1), 0);
5151

5252
mod.forward(inputs);
5353
for (auto _ : state) {
@@ -65,7 +65,7 @@ static void BM_deep_wide_jit_profiling_executor(benchmark::State& state) {
6565

6666
std::vector<IValue> inputs({ad_emb_packed, user_emb, wide});
6767

68-
CHECK_EQ(unsetenv("TORCH_JIT_DISABLE_NEW_EXECUTOR"), 0);
68+
TORCH_CHECK_EQ(unsetenv("TORCH_JIT_DISABLE_NEW_EXECUTOR"), 0);
6969

7070
mod.forward(inputs);
7171
for (auto _ : state) {

binaries/benchmark_helper.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ int loadInput(
173173
LOG(INFO) << "Running on GPU.";
174174
#ifdef __CUDA_ARCH__
175175
caffe2::TensorCUDA* tensor = blob->GetMutable<caffe2::TensorCUDA>();
176-
CHECK_NOTNULL(tensor);
176+
TORCH_CHECK_NOTNULL(tensor);
177177
tensor->Resize(input_dims);
178178
if (input_type_list[i] == "uint8_t") {
179179
tensor->mutable_data<uint8_t>();
@@ -189,17 +189,17 @@ int loadInput(
189189
if (input_type_list[i] == "uint8_t") {
190190
caffe2::int8::Int8TensorCPU* tensor =
191191
blob->GetMutable<caffe2::int8::Int8TensorCPU>();
192-
CHECK_NOTNULL(tensor);
192+
TORCH_CHECK_NOTNULL(tensor);
193193
tensor->t.Resize(input_dims);
194194
tensor->t.mutable_data<uint8_t>();
195195
} else if (input_type_list[i] == "float") {
196196
caffe2::TensorCPU* tensor = BlobGetMutableTensor(blob, caffe2::CPU);
197-
CHECK_NOTNULL(tensor);
197+
TORCH_CHECK_NOTNULL(tensor);
198198
tensor->Resize(input_dims);
199199
tensor->mutable_data<float>();
200200
} else if (input_type_list[i] == "int") {
201201
caffe2::TensorCPU* tensor = BlobGetMutableTensor(blob, caffe2::CPU);
202-
CHECK_NOTNULL(tensor);
202+
TORCH_CHECK_NOTNULL(tensor);
203203
tensor->Resize(input_dims);
204204
tensor->mutable_data<int>();
205205
} else {
@@ -495,7 +495,7 @@ int benchmark(
495495
net_def.set_name("benchmark");
496496
}
497497
caffe2::NetBase* net = workspace->CreateNet(net_def);
498-
CHECK_NOTNULL(net);
498+
TORCH_CHECK_NOTNULL(net);
499499
runNetwork(
500500
workspace,
501501
net,

binaries/convert_and_benchmark.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ void runNetwork(
591591
}
592592

593593
caffe2::NetBase* net = workspace->CreateNet(net_def);
594-
CHECK_NOTNULL(net);
594+
TORCH_CHECK_NOTNULL(net);
595595

596596
LOG(INFO) << "Starting benchmark.";
597597
caffe2::ObserverConfig::initSampleRate(1, 1, 1, run_individual, warmup);

binaries/make_image_db.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -251,7 +251,7 @@ void ConvertImageDataset(
251251
// Synthesize key for this entry
252252
auto key_len = snprintf(
253253
key_cstr, sizeof(key_cstr), "%08d_%s", i, lines[i].first.c_str());
254-
DCHECK_LE(key_len, sizeof(key_cstr));
254+
TORCH_DCHECK_LE(key_len, sizeof(key_cstr));
255255

256256
// Put in db
257257
transaction->Put(string(key_cstr), std::move(value));

binaries/speed_benchmark.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -136,12 +136,12 @@ int main(int argc, char** argv) {
136136
if (input_type_list[i] == "uint8_t") {
137137
caffe2::int8::Int8TensorCPU* tensor =
138138
blob->GetMutable<caffe2::int8::Int8TensorCPU>();
139-
CHECK_NOTNULL(tensor);
139+
TORCH_CHECK_NOTNULL(tensor);
140140
tensor->t.Resize(input_dims);
141141
tensor->t.mutable_data<uint8_t>();
142142
} else if (input_type_list[i] == "float") {
143143
caffe2::TensorCPU* tensor = BlobGetMutableTensor(blob, caffe2::CPU);
144-
CHECK_NOTNULL(tensor);
144+
TORCH_CHECK_NOTNULL(tensor);
145145
tensor->Resize(input_dims);
146146
tensor->mutable_data<float>();
147147
} else {
@@ -184,7 +184,7 @@ int main(int argc, char** argv) {
184184
}
185185

186186
caffe2::NetBase* net = workspace->CreateNet(net_def);
187-
CHECK_NOTNULL(net);
187+
TORCH_CHECK_NOTNULL(net);
188188
CAFFE_ENFORCE(net->Run());
189189
net->TEST_Benchmark(FLAGS_warmup, FLAGS_iter, FLAGS_run_individual);
190190

c10/test/util/logging_test.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -141,7 +141,7 @@ TEST(LoggingTest, Join) {
141141

142142
TEST(LoggingTest, TestDanglingElse) {
143143
if (true)
144-
DCHECK_EQ(1, 1);
144+
TORCH_DCHECK_EQ(1, 1);
145145
else
146146
GTEST_FAIL();
147147
}

c10/util/Logging.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ using EnforceNotMet = ::c10::Error;
180180
* With further usages like `CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))`
181181
*
182182
* Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided
183-
* too. Please use them instead of CHECK_EQ and friends for failures in
183+
* too. Please use them instead of TORCH_CHECK_EQ and friends for failures in
184184
* user-provided input.
185185
*/
186186

c10/util/Registry.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,10 +64,10 @@ class Registry {
6464
const RegistryPriority priority = REGISTRY_DEFAULT) {
6565
std::lock_guard<std::mutex> lock(register_mutex_);
6666
// The if statement below is essentially the same as the following line:
67-
// CHECK_EQ(registry_.count(key), 0) << "Key " << key
67+
// TORCH_CHECK_EQ(registry_.count(key), 0) << "Key " << key
6868
// << " registered twice.";
69-
// However, CHECK_EQ depends on google logging, and since registration is
70-
// carried out at static initialization time, we do not want to have an
69+
// However, TORCH_CHECK_EQ depends on google logging, and since registration
70+
// is carried out at static initialization time, we do not want to have an
7171
// explicit dependency on glog's initialization function.
7272
if (registry_.count(key) != 0) {
7373
auto cur_priority = priority_[key];

0 commit comments

Comments
 (0)