Skip to content

Commit 636858a

Browse files
authored
Merge branch 'master' into master
2 parents a2f65c1 + a65bbe2 commit 636858a

File tree

3 files changed

+6
-6
lines changed

3 files changed

+6
-6
lines changed

.jenkins/build.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ if [[ "${JOB_BASE_NAME}" == *worker_* ]]; then
4040
# Step 1: Remove runnable code from tutorials that are not supposed to be run
4141
python $DIR/remove_runnable_code.py beginner_source/aws_distributed_training_tutorial.py beginner_source/aws_distributed_training_tutorial.py || true
4242
# TODO: Fix bugs in these tutorials to make them runnable again
43-
python $DIR/remove_runnable_code.py beginner_source/audio_classifier_tutorial.py beginner_source/audio_classifier_tutorial.py || true
43+
# python $DIR/remove_runnable_code.py beginner_source/audio_classifier_tutorial.py beginner_source/audio_classifier_tutorial.py || true
4444

4545
# Step 2: Keep certain tutorials based on file count, and remove runnable code in all other tutorials
4646
# IMPORTANT NOTE: We assume that each tutorial has a UNIQUE filename.

advanced_source/dynamic_quantization_tutorial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ def tokenize(self, path):
178178

179179
word = corpus.dictionary.idx2word[word_idx]
180180

181-
outf.write(str(word) + ('\n' if i % 20 == 19 else ' '))
181+
outf.write(str(word.encode('utf-8')) + ('\n' if i % 20 == 19 else ' '))
182182

183183
if i % 100 == 0:
184184
print('| Generated {}/{} words'.format(i, 1000))

beginner_source/blitz/neural_networks_tutorial.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -90,9 +90,9 @@ def num_flat_features(self, x):
9090
print(params[0].size()) # conv1's .weight
9191

9292
########################################################################
93-
# Let try a random 32x32 input.
93+
# Let's try a random 32x32 input.
9494
# Note: expected input size of this net (LeNet) is 32x32. To use this net on
95-
# MNIST dataset, please resize the images from the dataset to 32x32.
95+
# the MNIST dataset, please resize the images from the dataset to 32x32.
9696

9797
input = torch.randn(1, 1, 32, 32)
9898
out = net(input)
@@ -227,7 +227,7 @@ def num_flat_features(self, x):
227227
#
228228
# ``weight = weight - learning_rate * gradient``
229229
#
230-
# We can implement this using simple python code:
230+
# We can implement this using simple Python code:
231231
#
232232
# .. code:: python
233233
#
@@ -258,4 +258,4 @@ def num_flat_features(self, x):
258258
#
259259
# Observe how gradient buffers had to be manually set to zero using
260260
# ``optimizer.zero_grad()``. This is because gradients are accumulated
261-
# as explained in `Backprop`_ section.
261+
# as explained in the `Backprop`_ section.

0 commit comments

Comments
 (0)