Skip to content

Commit 1e29ee5

Browse files
committed
Fix errors
Signed-off-by: Yuanyuan Chen <[email protected]>
1 parent d81aa3a commit 1e29ee5

18 files changed

+26
-37
lines changed

.github/scripts/trymerge.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1092,7 +1092,7 @@ def _comment_from_node(node: Any) -> GitHubComment:
10921092
editor = node["editor"]
10931093
return GitHubComment(
10941094
body_text=node["bodyText"],
1095-
created_at=node["createdAt"] if "createdAt" in node else "",
1095+
created_at=node.get("createdAt", ""),
10961096
author_login=node["author"]["login"],
10971097
author_url=node["author"].get("url", None),
10981098
author_association=node["authorAssociation"],

test/ao/sparsity/test_activation_sparsifier.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def _check_constructor(self, activation_sparsifier, model, defaults, sparse_conf
5555

5656
for key, config in sparsifier_defaults.items():
5757
# all the keys in combined_defaults should be present in sparsifier defaults
58-
assert config == combined_defaults.get(key, None)
58+
assert config == combined_defaults.get(key)
5959

6060
def _check_register_layer(
6161
self, activation_sparsifier, defaults, sparse_config, layer_args_list

test/distributed/checkpoint/test_checkpoint.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ def __init__(self, fail_conf):
152152
self.rank = 0 if not dist.is_initialized() else dist.get_rank()
153153

154154
def _get_ranks(self, name):
155-
return self.fail_conf[name] if name in self.fail_conf else None
155+
return self.fail_conf.get(name, None)
156156

157157
def _fail_rank(self, name):
158158
ranks = self._get_ranks(name)

test/distributed/pipelining/test_schedule.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def __init__(self, *args, **kwargs):
6565
self.num_stages = kwargs.get("num_stages", 1)
6666
self.group_size = kwargs.get("group_size", 1)
6767
self.group_rank = kwargs.get("group_rank", 0)
68-
self.group = kwargs.get("group", None)
68+
self.group = kwargs.get("group")
6969

7070
def _create_grad_recv_info(self, *args, **kwargs):
7171
return None

test/distributed/test_c10d_nccl.py

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2770,11 +2770,7 @@ def hook(work_info: torch._C._distributed_c10d.WorkInfo):
27702770
# from rank0 to other ranks. However, this is DDP's internal implementation,
27712771
# which is subject to change in future versions.
27722772
self.assertTrue(num_hook_fired[OpType.BROADCAST] > 0)
2773-
ctor_allreduce = (
2774-
num_hook_fired[OpType.ALLREDUCE]
2775-
if OpType.ALLREDUCE in num_hook_fired
2776-
else 0
2777-
)
2773+
ctor_allreduce = num_hook_fired.get(OpType.ALLREDUCE, 0)
27782774

27792775
x = torch.zeros(2, 1000).cuda(self.rank)
27802776
ddp(x).sum().backward()

test/dynamo/test_python_autograd.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ def grad(L, desired_results: list[Variable]) -> list[Variable]:
8282
# look up dL_dentries. If a variable is never used to compute the loss,
8383
# we consider its gradient None, see the note below about zeros for more information.
8484
def gather_grad(entries: list[str]):
85-
return [dL_d[entry] if entry in dL_d else None for entry in entries]
85+
return [dL_d.get(entry) for entry in entries]
8686

8787
# propagate the gradient information backward
8888
for entry in reversed(gradient_tape):

test/dynamo/test_subclasses.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -286,7 +286,7 @@ def __tensor_flatten__(self):
286286
def __tensor_unflatten__(inner_tensors, metadata, outer_size, outer_stride):
287287
return OptionalScaledTensor(
288288
inner_tensors["_data"],
289-
inner_tensors["_scale"] if "_scale" in inner_tensors else None,
289+
inner_tensors.get("_scale", None),
290290
constant=metadata["_constant"],
291291
)
292292

test/fx/test_fx_traceback.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -177,9 +177,7 @@ def forward(self, x):
177177
for node_name_2 in node_name_to_from_node:
178178
if node_name_2 in {
179179
node_name_1,
180-
same_ancestor_nodes[node_name_1]
181-
if node_name_1 in same_ancestor_nodes
182-
else None,
180+
same_ancestor_nodes.get(node_name_1),
183181
}:
184182
self.assertEqual(
185183
node_name_to_from_node[node_name_1],

test/inductor/test_cudagraph_trees.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ def tearDown(self):
177177

178178
def get_manager(self, device_index=None):
179179
return torch._inductor.cudagraph_trees.get_container(
180-
self.device_idx if not device_index else device_index
180+
device_index if device_index else self.device_idx
181181
).tree_manager
182182

183183
def get_roots(self):

test/mobile/model_test/update_production_ops.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -17,17 +17,13 @@
1717
for info in model_infos:
1818
for op in info["root_operators"]:
1919
# aggregate occurance per op
20-
root_operators[op] = 1 + (root_operators[op] if op in root_operators else 0)
20+
root_operators[op] = 1 + (root_operators.get(op, 0))
2121
for op in info["traced_operators"]:
2222
# aggregate occurance per op
23-
traced_operators[op] = 1 + (
24-
traced_operators[op] if op in traced_operators else 0
25-
)
23+
traced_operators[op] = 1 + (traced_operators.get(op, 0))
2624
# merge dtypes for each kernel
2725
for kernal, dtypes in info["kernel_metadata"].items():
28-
new_dtypes = dtypes + (
29-
kernel_metadata[kernal] if kernal in kernel_metadata else []
30-
)
26+
new_dtypes = dtypes + (kernel_metadata.get(kernal, []))
3127
kernel_metadata[kernal] = list(set(new_dtypes))
3228

3329

0 commit comments

Comments
 (0)