Skip to content

Commit

Permalink
[CodeStyle][CINN] fix Ruff lint errors (pyupgrade rules) (#54988)
Browse files Browse the repository at this point in the history
  • Loading branch information
gouzil authored Jul 3, 2023
1 parent 1899505 commit 402e277
Show file tree
Hide file tree
Showing 25 changed files with 55 additions and 71 deletions.
2 changes: 1 addition & 1 deletion paddle/cinn/backends/llvm/generate_runtime_llvm_ir.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def main():
srcs.append(')ROC"')
srcs.append(');\n')

cmd = "{} --version".format(llvm_config)
cmd = f"{llvm_config} --version"
version = (
subprocess.check_output(cmd, shell=True)
.decode('utf-8')
Expand Down
12 changes: 0 additions & 12 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,6 @@ ignore = [
"python/cinn/**" = [
"F401",
"F403",
"UP004",
]
"test/cinn/**" = [
"F401",
Expand All @@ -112,23 +111,12 @@ ignore = [
"F901",
"C408",
"C417",
"UP004",
"UP008",
"UP027",
"UP032",
"UP034",
"PLR0402",
"PLC0414",
"PLE1205",
]
"paddle/cinn/**" = [
"UP032",
]
"tools/cinn/**" = [
"F401",
"C416",
"UP004",
"UP031",
"UP032",
"PLR0402",
]
2 changes: 1 addition & 1 deletion python/cinn/auto_schedule/cost_model/cost_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class CostModelType(enum.Enum):
XGB = 1


class CostModel(object):
class CostModel:
"""
A base class to call different cost model algorithm.
"""
Expand Down
2 changes: 1 addition & 1 deletion python/cinn/auto_schedule/cost_model/xgb_cost_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
import xgboost as xgb


class XgbCostModel(object):
class XgbCostModel:
"""
A cost model implemented by XgbCostModel
"""
Expand Down
2 changes: 1 addition & 1 deletion test/cinn/conv2d_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def conv2d_native(inputs_data, input_shape, filter_size, attrs, is_depthwise):
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
raise ValueError(f"attr_store {key} is not supported")

img = static.data(name='img', shape=input_shape[1:], dtype='float32')
if is_depthwise:
Expand Down
6 changes: 2 additions & 4 deletions test/cinn/fusion/fusion_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

class FusionTest(PassTest):
def __init__(self, *args, **kwargs):
super(FusionTest, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)

def init_input_data(self):
"""Set feed data"""
Expand All @@ -44,9 +44,7 @@ def check_fusion_outputs(
fusion_passes = ["OpFusionPass", "FusionMergePass"]

real_group_size = self.get_pass_size(base_passes + fusion_passes)
logger.debug(
"The model has been fused into {} groups".format(real_group_size)
)
logger.debug(f"The model has been fused into {real_group_size} groups")
self.assertEqual(
real_group_size,
group_size,
Expand Down
2 changes: 1 addition & 1 deletion test/cinn/op_mappers/op_mapper_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

class OpMapperTest(OpTest):
def __init__(self, *args, **kwargs):
super(OpMapperTest, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self._init_place()
self.init_input_data()

Expand Down
4 changes: 2 additions & 2 deletions test/cinn/ops/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def convert_uint16_to_float(data):

class OpTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(OpTest, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self._init_target()
self._init_results()
self._init_seed()
Expand Down Expand Up @@ -304,7 +304,7 @@ def _check_error_message(output_id, expect, actual):

error_message = "[Check " + name + "] " + error_message

logger.debug("{} {}".format(is_allclose, error_message))
logger.debug(f"{is_allclose} {error_message}")
self.assertTrue(is_allclose, msg=error_message)

@staticmethod
Expand Down
2 changes: 1 addition & 1 deletion test/cinn/ops/test_bitcast_convert_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def init_case(self):
packed = pack(data.size * 'h', *data.flatten())
self.inputs = {"x": data}
self.outputs = {
"y": np.array(unpack('4i', packed), dtype='int32').reshape((4)),
"y": np.array(unpack('4i', packed), dtype='int32').reshape(4),
"output_type": "int32",
}

Expand Down
4 changes: 2 additions & 2 deletions test/cinn/ops/test_gather_nd_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def build_paddle_program(self, target):
x = paddle.to_tensor(x, stop_gradient=False)
index = paddle.to_tensor(index, stop_gradient=False)
out = paddle.gather_nd(x, index)
logger.debug(" -- The output of Paddle:\n{}".format(out))
logger.debug(f" -- The output of Paddle:\n{out}")
self.paddle_outputs.append(out)

def build_cinn_program(self, target):
Expand All @@ -70,7 +70,7 @@ def build_cinn_program(self, target):
res = self.get_cinn_output(
prog, target, [x, index], self.data[i], [out]
)
logger.debug(" -- The output of CINN:\n{}".format(res))
logger.debug(f" -- The output of CINN:\n{res}")
self.cinn_outputs.extend(res)

def test_check_results(self):
Expand Down
4 changes: 2 additions & 2 deletions test/cinn/ops/test_gather_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def build_paddle_program(self, target):
x = paddle.to_tensor(x, stop_gradient=False)
index = paddle.to_tensor(index, stop_gradient=False)
out = paddle.gather(x, index, axis)
logger.debug(" -- The output of Paddle:\n{}".format(out))
logger.debug(f" -- The output of Paddle:\n{out}")
self.paddle_outputs.append(out)

def build_cinn_program(self, target):
Expand All @@ -67,7 +67,7 @@ def build_cinn_program(self, target):
out = builder.gather(x, index, axis=axis)
prog = builder.build()
res = self.get_cinn_output(prog, target, [x, index], self.data, [out])
logger.debug(" -- The output of CINN:\n{}".format(res))
logger.debug(f" -- The output of CINN:\n{res}")
self.cinn_outputs.extend(res)

def test_check_results(self):
Expand Down
2 changes: 1 addition & 1 deletion test/cinn/ops/test_scatter_add.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ def build_paddle_program(self, target):
[i, j, k, self.inputs["index"][l]]
)
else:
self.assertTrue(False, "Axis {} No Implement".format(pos_axis))
self.assertTrue(False, f"Axis {pos_axis} No Implement")

index = paddle.to_tensor(index_nd, stop_gradient=True)
res = paddle.scatter_nd_add(x, index, y)
Expand Down
2 changes: 1 addition & 1 deletion test/cinn/ops/test_scatter_assign_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def build_paddle_program(self, target):
l
]
else:
self.assertTrue(False, "Axis {} No Implement".format(self.axis))
self.assertTrue(False, f"Axis {self.axis} No Implement")

pd_out = paddle.to_tensor(out, stop_gradient=True)
self.paddle_outputs = [pd_out]
Expand Down
2 changes: 1 addition & 1 deletion test/cinn/ops/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ def paddle_func(self, *args):
def cinn_func(self, builder, *args):
return eval(fn_cinn)(*args)

cls_name = "{}_{}".format(parent.__name__, test_name)
cls_name = f"{parent.__name__}_{test_name}"
TestClass.__name__ = cls_name
globals()[cls_name] = TestClass

Expand Down
10 changes: 4 additions & 6 deletions test/cinn/passes/pass_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@

class PassTest(OpTest):
def __init__(self, *args, **kwargs):
super(PassTest, self).__init__(*args, **kwargs)
super().__init__(*args, **kwargs)
self.init_input_data()

def init_input_data(self) -> dict:
Expand Down Expand Up @@ -82,9 +82,9 @@ def get_pass_outputs(self, passes):
def get_pass_size(self, passes):
pass_prog, _, outputs = self.run_program()
fetch_ids = {str(out) for out in outputs}
logger.debug("Before pass {}:\n{}".format(passes, str(pass_prog)))
logger.debug(f"Before pass {passes}:\n{str(pass_prog)}")
op_num = pass_prog.apply_pass(fetch_ids, self.target, passes)
logger.debug("After pass {}:\n{}".format(passes, str(pass_prog)))
logger.debug(f"After pass {passes}:\n{str(pass_prog)}")
return op_num

def check_pass_outputs(
Expand All @@ -97,9 +97,7 @@ def check_pass_outputs(
equal_nan=False,
):
base_pass_size = self.get_pass_size(base_passes)
logger.debug(
"Pass after base pass optimize has {} ops".format(base_pass_size)
)
logger.debug(f"Pass after base pass optimize has {base_pass_size} ops")
test_pass_size = self.get_pass_size(base_passes + test_passes)
logger.debug(
"Pass after base and test pass optimize has {} ops".format(
Expand Down
18 changes: 9 additions & 9 deletions test/cinn/pool_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def pool2d(np_data, attrs, dtype="float32"):
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
raise ValueError(f"attr_store {key} is not supported")

if data_format == "NCHW":
in_n, in_c, in_h, in_w = in_shape = np_data.shape
Expand All @@ -52,7 +52,7 @@ def pool2d(np_data, attrs, dtype="float32"):
height_axis = 1
width_axis = 2
else:
raise ValueError("data_format {} is not supported".format(data_format))
raise ValueError(f"data_format {data_format} is not supported")

if isinstance(kernel_size, int):
k_h = k_w = kernel_size
Expand Down Expand Up @@ -205,7 +205,7 @@ def pool2d(np_data, attrs, dtype="float32"):
axis=(height_axis, width_axis),
)
else:
raise ValueError("pool type {} is not supported".format(pool_type))
raise ValueError(f"pool type {pool_type} is not supported")

ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape]
Expand All @@ -232,7 +232,7 @@ def pool3d(np_data, attrs, dtype="float32"):
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
raise ValueError(f"attr_store {key} is not supported")

if data_format == "NCDHW":
in_n, in_c, in_d, in_h, in_w = in_shape = np_data.shape
Expand All @@ -245,7 +245,7 @@ def pool3d(np_data, attrs, dtype="float32"):
height_axis = 2
width_axis = 3
else:
raise ValueError("data_format {} is not supported".format(data_format))
raise ValueError(f"data_format {data_format} is not supported")

if isinstance(kernel_size, int):
k_d = k_h = k_w = kernel_size
Expand Down Expand Up @@ -416,7 +416,7 @@ def pool3d(np_data, attrs, dtype="float32"):
axis=(depth_axis, height_axis, width_axis),
)
else:
raise ValueError("pool type {} is not supported".format(pool_type))
raise ValueError(f"pool type {pool_type} is not supported")

ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape]
Expand All @@ -443,7 +443,7 @@ def pool1d(np_data, attrs, dtype="float32"):
elif key == "data_format":
data_format = attrs.get_attr("data_format")
else:
raise ValueError("attr_store {} is not supported".format(key))
raise ValueError(f"attr_store {key} is not supported")

if data_format == "NCW":
in_n, in_c, in_w = in_shape = np_data.shape
Expand All @@ -452,7 +452,7 @@ def pool1d(np_data, attrs, dtype="float32"):
in_n, in_w, in_c = in_shape = np_data.shape
width_axis = 1
else:
raise ValueError("data_format {} is not supported".format(data_format))
raise ValueError(f"data_format {data_format} is not supported")

if isinstance(kernel_size, int):
k_w = kernel_size
Expand Down Expand Up @@ -539,7 +539,7 @@ def pool1d(np_data, attrs, dtype="float32"):
pad_np[:, k * s_w : k * s_w + k_w, :], axis=width_axis
)
else:
raise ValueError("pool type {} is not supported".format(pool_type))
raise ValueError(f"pool type {pool_type} is not supported")

ret_np = np.maximum(ret_np, fill_value)
return ret_np, [out_shape]
4 changes: 2 additions & 2 deletions test/cinn/test_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def test_matmul_tile(self):


def create_matmul_basic(target, m, n, k):
m, n, k = [ir.Expr(_) for _ in (m, n, k)]
m, n, k = (ir.Expr(_) for _ in (m, n, k))

a = lang.Placeholder("float32", "A", [m, k])
b = lang.Placeholder("float32", "B", [k, n])
Expand All @@ -90,7 +90,7 @@ def create_matmul_basic(target, m, n, k):


def create_matmul_tile(target, m, n, k):
m, n, k = [ir.Expr(_) for _ in [m, n, k]]
m, n, k = (ir.Expr(_) for _ in [m, n, k])
a = lang.Placeholder("float32", "A", [m, k])
b = lang.Placeholder("float32", "B", [k, n])

Expand Down
2 changes: 1 addition & 1 deletion test/cinn/test_packed_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def mul(x, y):
self.assertEqual(mul(4, 5), 20)

def test_callable_object(self):
class Accumulator(object):
class Accumulator:
def __init__(self, init):
self.init = init

Expand Down
14 changes: 6 additions & 8 deletions test/cinn/test_paddle_model_convertor.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,9 +165,9 @@ def load_paddle_program(self):
return_numpy=True,
)

logger.debug(msg="Program:\n{}".format(self.inference_program))
logger.debug(msg="Param List: {}".format(self.param_vars.keys()))
logger.debug(msg="Feed List: {}".format(self.feed_names))
logger.debug(msg=f"Program:\n{self.inference_program}")
logger.debug(msg=f"Param List: {self.param_vars.keys()}")
logger.debug(msg=f"Feed List: {self.feed_names}")
logger.debug(
msg="Fetch List: {}".format(
[var.name for var in self.fetch_targets]
Expand Down Expand Up @@ -195,7 +195,7 @@ def build_paddle_program(self, target):
fetch_list=self.fetch_targets,
return_numpy=True,
)
logger.debug("Paddle Result:\n{}".format(self.paddle_outputs))
logger.debug(f"Paddle Result:\n{self.paddle_outputs}")

def build_cinn_program(self, target):
self.assertEqual(
Expand Down Expand Up @@ -237,9 +237,7 @@ def build_cinn_program(self, target):

# get cinn input list
inputs = prog.get_inputs()
logger.debug(
"CINN Input List: {}".format([var.name() for var in inputs])
)
logger.debug(f"CINN Input List: {[var.name() for var in inputs]}")
self.assertEqual(
len(feed_with_param),
len(inputs),
Expand Down Expand Up @@ -284,7 +282,7 @@ def build_cinn_program(self, target):
prog, target, cinn_inputs, cinn_feed_datas, cinn_output, passes=[]
)

logger.debug("CINN Result:\n{}".format(self.cinn_outputs))
logger.debug(f"CINN Result:\n{self.cinn_outputs}")

def test_check_results(self):
# TODO(6clc): There is a random accuracy problem,
Expand Down
4 changes: 2 additions & 2 deletions test/cinn/test_pe_elementwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,13 +113,13 @@ def union_tester(
is_round=False,
is_bool=False,
):
m, n = [
m, n = (
ir.Expr(_)
for _ in (
self.m,
self.n,
)
]
)

x = lang.Placeholder(dtype, "x", [m, n])
y = cinn_fn(x.to_tensor())
Expand Down
Loading

0 comments on commit 402e277

Please sign in to comment.