Skip to content

Commit a47667c

Browse files
committed
Use f-strings
1 parent 22cda00 commit a47667c

File tree

1 file changed

+8
-10
lines changed

1 file changed

+8
-10
lines changed

distributed/rpc/parameter_server/rpc_param_server.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -19,19 +19,19 @@
1919
class Net(nn.Module):
2020
def __init__(self, num_gpus=0):
2121
super(Net, self).__init__()
22-
print("Using {} GPUs to train".format(num_gpus))
22+
print(f"Using {num_gpus} GPUs to train")
2323
self.num_gpus = num_gpus
2424
device = torch.device(
2525
"cuda:0" if torch.cuda.is_available() and self.num_gpus > 0 else "cpu")
26-
print("Putting first 2 convs on {}".format(str(device)))
26+
print(f"Putting first 2 convs on {str(device)}")
2727
# Put conv layers on the first cuda device
2828
self.conv1 = nn.Conv2d(1, 32, 3, 1).to(device)
2929
self.conv2 = nn.Conv2d(32, 64, 3, 1).to(device)
3030
# Put rest of the network on the 2nd cuda device, if there is one
3131
if "cuda" in str(device) and num_gpus > 1:
3232
device = torch.device("cuda:1")
3333

34-
print("Putting rest of layers on {}".format(str(device)))
34+
print(f"Putting rest of layers on {str(device)}")
3535
self.dropout1 = nn.Dropout2d(0.25).to(device)
3636
self.dropout2 = nn.Dropout2d(0.5).to(device)
3737
self.fc1 = nn.Linear(9216, 128).to(device)
@@ -179,9 +179,7 @@ def run_training_loop(rank, num_gpus, train_loader, test_loader):
179179
target = target.to(model_output.device)
180180
loss = F.nll_loss(model_output, target)
181181
if i % 5 == 0:
182-
print(
183-
"Rank {} training batch {} loss {}".format(
184-
rank, i, loss.item()))
182+
print(f"Rank {rank} training batch {i} loss {loss.item()}")
185183
dist_autograd.backward(cid, [loss])
186184
# Ensure that dist autograd ran successfully and gradients were
187185
# returned.
@@ -209,18 +207,18 @@ def get_accuracy(test_loader, model):
209207
correct = pred.eq(target.view_as(pred)).sum().item()
210208
correct_sum += correct
211209

212-
print("Accuracy {}".format(correct_sum / len(test_loader.dataset)))
210+
print(f"Accuracy {correct_sum / len(test_loader.dataset)}")
213211

214212

215213
# Main loop for trainers.
216214
def run_worker(rank, world_size, num_gpus, train_loader, test_loader):
217-
print("Worker rank {} initializing RPC".format(rank))
215+
print(f"Worker rank {rank} initializing RPC")
218216
rpc.init_rpc(
219-
name="trainer_{}".format(rank),
217+
name=f"trainer_{rank}",
220218
rank=rank,
221219
world_size=world_size)
222220

223-
print("Worker {} done initializing RPC".format(rank))
221+
print(f"Worker {rank} done initializing RPC")
224222

225223
run_training_loop(rank, num_gpus, train_loader, test_loader)
226224
rpc.shutdown()

0 commit comments

Comments
 (0)