Skip to content

Commit 5e8c5ab

Browse files
authored
fix default arg (#1927)
* fix default * formatting errors * update * flake8
1 parent ca81569 commit 5e8c5ab

File tree

7 files changed

+30
-24
lines changed

7 files changed

+30
-24
lines changed

pytorch_lightning/core/saving.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,6 @@ def save_hparams_to_yaml(config_yaml, hparams: Union[dict, Namespace]) -> None:
154154
def convert(val: str) -> Union[int, float, bool, str]:
155155
try:
156156
return ast.literal_eval(val)
157-
except (ValueError, SyntaxError) as e:
158-
log.debug(e)
157+
except (ValueError, SyntaxError) as err:
158+
log.debug(err)
159159
return val

pytorch_lightning/loggers/comet.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ def __init__(self,
135135
if experiment_name:
136136
try:
137137
self.name = experiment_name
138-
except TypeError as e:
138+
except TypeError:
139139
log.exception("Failed to set experiment name for comet.ml logger")
140140
self._kwargs = kwargs
141141

pytorch_lightning/overrides/data_parallel.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -177,9 +177,9 @@ def _worker(i, module, input, kwargs, device=None):
177177

178178
with lock:
179179
results[i] = output
180-
except Exception as e:
180+
except Exception as ex:
181181
with lock:
182-
results[i] = e
182+
results[i] = ex
183183

184184
# TODO: fix hack (maybe not a hack)
185185
# make sure each module knows what training state it's in...

pytorch_lightning/trainer/distrib_data_parallel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -277,7 +277,7 @@ def configure_slurm_ddp(self, num_gpu_nodes):
277277
should_fake = int(os.environ['FAKE_SLURM_MANAGING_TASKS'])
278278
if should_fake:
279279
self.is_slurm_managing_tasks = True
280-
except Exception as e:
280+
except Exception:
281281
pass
282282

283283
# notify user the that slurm is managing tasks

pytorch_lightning/trainer/distrib_parts.py

Lines changed: 22 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -343,7 +343,7 @@
343343
import time
344344
import random
345345
import torch
346-
from typing import Union
346+
from typing import Union, Callable
347347

348348
from pytorch_lightning import _logger as log
349349
from pytorch_lightning.loggers import LightningLoggerBase
@@ -748,26 +748,33 @@ def determine_root_gpu_device(gpus):
748748
return root_gpu
749749

750750

751-
def retry_jittered_backoff(f, num_retries=5):
752-
# Based on:
753-
# https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
754-
cap = 1.0 # max sleep time is 1s
755-
base = 0.01 # initial sleep time is 10ms
756-
sleep = base # initial sleep time is 10ms
751+
def retry_jittered_backoff(func: Callable, num_retries: int = 5, cap_delay: float = 1.0, base_delay: float = 0.01):
752+
"""Retry jittered backoff.
753+
754+
Based on:
755+
https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
756+
757+
Args:
758+
func: tested function
759+
num_retries: number of tries
760+
cap_delay: max sleep time
761+
base_delay: initial sleep time is 10ms
762+
"""
763+
sleep_delay = base_delay # initial sleep time is 10ms
757764

758765
for i in range(num_retries):
759766
try:
760-
return f()
761-
except RuntimeError as e:
767+
return func()
768+
except RuntimeError as err:
762769
if i == num_retries - 1:
763-
raise e
770+
raise err
764771
else:
765772
continue
766-
time.sleep(sleep)
767-
sleep = min(cap, random.uniform(base, sleep * 3))
773+
time.sleep(sleep_delay)
774+
sleep_delay = min(cap_delay, random.uniform(base_delay, sleep_delay * 3))
768775

769776

770-
def pick_single_gpu(exclude_gpus=[]):
777+
def pick_single_gpu(exclude_gpus: list):
771778
for i in range(torch.cuda.device_count()):
772779
if i in exclude_gpus:
773780
continue
@@ -781,9 +788,9 @@ def pick_single_gpu(exclude_gpus=[]):
781788
raise RuntimeError("No GPUs available.")
782789

783790

784-
def pick_multiple_gpus(n):
791+
def pick_multiple_gpus(nb):
785792
picked = []
786-
for _ in range(n):
793+
for _ in range(nb):
787794
picked.append(pick_single_gpu(exclude_gpus=picked))
788795

789796
return picked

pytorch_lightning/trainer/training_io.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,6 @@
8484
"""
8585

8686
import os
87-
import pickle
8887
import re
8988
import signal
9089
from abc import ABC
@@ -211,7 +210,7 @@ def register_slurm_signal_handlers(self):
211210
job_name = os.environ['SLURM_JOB_NAME']
212211
if job_name != 'bash':
213212
on_slurm = True
214-
except Exception as e:
213+
except Exception:
215214
pass
216215

217216
if on_slurm:

tests/trainer/test_trainer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -823,5 +823,5 @@ def __init__(self, **kwargs):
823823
assert trainer.fast_dev_run
824824

825825
# when we pass in an unknown arg, the base class should complain
826-
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'abcdefg'") as e:
826+
with pytest.raises(TypeError, match=r"__init__\(\) got an unexpected keyword argument 'abcdefg'"):
827827
TrainerSubclass(abcdefg='unknown_arg')

0 commit comments

Comments
 (0)