Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add flake8 eradicate #1177

Merged
merged 1 commit into from
Oct 17, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,12 @@ per-file-ignores =
__init__.py: F403
# module level import not at top of file
target.py: E402
# config should containe code lines examples in comment
docs/source/conf.py: E800
exclude =
.git,
__pycache__,
_version.py,
versioneer.py,
lowerer.py,
parfor.py
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ repos:
rev: 3.9.2
hooks:
- id: flake8
additional_dependencies: [flake8-eradicate]
- repo: https://github.com/koalaman/shellcheck-precommit
rev: v0.8.0
hooks:
Expand Down
3 changes: 0 additions & 3 deletions numba_dpex/core/itanium_mangler.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,9 +184,6 @@ def mangle_identifier(ident, template_params="", *, abi_tags=(), uid=None):

This treats '.' as '::' in C++.
"""
# if uid is not None:
# Add uid to abi-tags
# abi_tags = (f"v{uid}", *abi_tags)
parts = [_len_encoded(_escape_string(x)) for x in ident.split(".")]
enc_abi_tags = list(map(mangle_abi_tag, abi_tags))
extras = template_params + "".join(enc_abi_tags)
Expand Down
2 changes: 1 addition & 1 deletion numba_dpex/core/kernel_interface/dispatcher.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ def __call__(self, *args):

# TODO: return event that calls wait if no reference to the object if
# it is possible
# event = exec_queue.submit(
# event = exec_queue.submit( # noqa: E800
exec_queue.submit(
sycl_kernel,
packer.unpacked_args,
Expand Down
2 changes: 1 addition & 1 deletion numba_dpex/core/parfors/parfor_lowerer.py
Original file line number Diff line number Diff line change
Expand Up @@ -414,7 +414,7 @@ def _lower_parfor_as_kernel(self, lowerer, parfor):
flags.error_model = "numpy"

# Can't get here unless
# flags.set('auto_parallel', ParallelOptions(True))
# flags.set('auto_parallel', ParallelOptions(True)) # noqa: E800
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]

# index variables should have the same type, check rest of indices
Expand Down
6 changes: 3 additions & 3 deletions numba_dpex/core/parfors/reduction_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def _allocate_partial_reduction_arrays(
# Get the type of the reduction variable.
redvar_typ = lowerer.fndesc.typemap[red_name]

# redarrvar_typ is type(partial_sum)
# redarrvar_typ is type(partial_sum) # noqa: E800 help understanding
redarrvar_typ = self._redtyp_to_redarraytype(redvar_typ, inputArrayType)
reddtype = redarrvar_typ.dtype
redarrdim = redarrvar_typ.ndim
Expand All @@ -63,7 +63,7 @@ def _allocate_partial_reduction_arrays(
name="tot_work",
)

# global_size_mod = tot_work%work_group_size
# global_size_mod = tot_work%work_group_size # noqa: E800 help understanding
ir_expr = ir.Expr.binop(
operator.mod, total_work_var, work_group_size_var, loc
)
Expand Down Expand Up @@ -232,7 +232,7 @@ def __init__(
loop_body = copy.copy(parfor_node.loop_body)
remove_dels(loop_body)

# parfor_dim = len(parfor_node.loop_nests)
# parfor_dim = len(parfor_node.loop_nests) # noqa: E800 help understanding
loop_indices = [
loop_nest.index_variable.name
for loop_nest in parfor_node.loop_nests
Expand Down
5 changes: 4 additions & 1 deletion numba_dpex/core/targets/dpjit_target.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,11 @@ def init(self):
self.is32bit = utils.MACHINE_BITS == 32
self._internal_codegen = JITCPUCodegen("numba.exec")
self.lower_extensions = {}
# TODO: initialize nrt once switched to nrt from drt. Most likely we
# call it somewhere. Double check.
# https://github.com/IntelPython/numba-dpex/issues/1175
# Initialize NRT runtime
# rtsys.initialize(self)
# rtsys.initialize(self) # noqa: E800
self.refresh()

@cached_property
Expand Down
2 changes: 1 addition & 1 deletion numba_dpex/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ def dpjit(*args, **kws):
# FIXME: When trying to use dpex's target context, overloads do not work
# properly. We will turn on dpex target once the issue is fixed.

# kws.update({"_target": "dpex"})
# kws.update({"_target": "dpex"}) # noqa: E800

return decorators.jit(*args, **kws)

Expand Down
4 changes: 2 additions & 2 deletions numba_dpex/dpnp_iface/arrayobj.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def impl(
shape,
dtype=None,
order="C",
# like=None, # see issue https://github.com/IntelPython/numba-dpex/issues/998
# like=None, # noqa: E800 see issue https://github.com/IntelPython/numba-dpex/issues/998
device=None,
usm_type="device",
sycl_queue=None,
Expand All @@ -243,7 +243,7 @@ def impl(
shape,
_dtype,
order,
# like, # see issue https://github.com/IntelPython/numba-dpex/issues/998
# like, # noqa: E800 see issue https://github.com/IntelPython/numba-dpex/issues/998
_device,
_usm_type,
sycl_queue,
Expand Down
6 changes: 0 additions & 6 deletions numba_dpex/examples/blacksholes_njit.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,6 @@

import numba_dpex as dpex

# @numba.vectorize(nopython=True)
# def cndf2(inp):
# out = 0.5 + 0.5 * math.erf((math.sqrt(2.0) / 2.0) * inp)
# return out


@dpex.dpjit
def blackscholes(sptprice, strike, timev, rate, volatility):
Expand Down Expand Up @@ -68,7 +63,6 @@ def run(iterations):
t1 = time.time()
put = blackscholes(sptprice, initStrike, rate, volatility, timev)
t = time.time() - t1
# print("checksum: ", sum(put))
print(put)
print("SELFTIMED ", t)

Expand Down
4 changes: 0 additions & 4 deletions numba_dpex/numba_patches/patch_arrayexpr_tree_to_ir.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,12 @@ def _ufunc_to_parfor_instr(
g_math_assign = ir.Assign(g_math, g_math_var, loc)
func_var_def = ir.Expr.getattr(g_math_var, "sqrt", loc)
out_ir.append(g_math_assign)
# out_ir.append(func_var_def)
ir_expr = ir.Expr.call(func_var, arg_vars, (), loc)
call_typ = typemap[func_var.name].get_call_type(
typingctx, tuple(typemap[a.name] for a in arg_vars), {}
)
calltypes[ir_expr] = call_typ
el_typ = call_typ.return_type
# signature(el_typ, el_typ)
out_ir.append(ir.Assign(func_var_def, func_var, loc))
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))

Expand Down Expand Up @@ -122,7 +120,6 @@ def _arrayexpr_tree_to_ir(
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
for T in array_analysis.MAP_TYPES:
if isinstance(op, T):
# elif isinstance(op, (np.ufunc, DUFunc)):
# function calls are stored in variables which are not removed
# op is typing_key to the variables type
el_typ = _ufunc_to_parfor_instr(
Expand Down Expand Up @@ -169,7 +166,6 @@ def _arrayexpr_tree_to_ir(
out_ir,
)
else:
# assert typemap[expr.name]==el_typ
el_typ = var_typ
ir_expr = expr
out_ir.append(ir.Assign(ir_expr, expr_out_var, loc))
Expand Down
2 changes: 1 addition & 1 deletion numba_dpex/numba_patches/patch_mk_alloc.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def _mk_alloc(
out,
)

# g_np_var = Global(numpy)
# g_np_var = Global(numpy) # noqa: E800
g_np_var = ir.Var(scope, mk_unique_var("$np_g_var"), loc)
if typemap:
typemap[g_np_var.name] = types.misc.Module(numpy)
Expand Down
14 changes: 0 additions & 14 deletions numba_dpex/ocl/mathdecl.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,9 +108,6 @@ def resolve_degrees(self, mod):
def resolve_radians(self, mod):
return types.Function(Math_radians)

# def resolve_hypot(self, mod):
# return types.Function(Math_hypot)

def resolve_copysign(self, mod):
return types.Function(Math_copysign)

Expand Down Expand Up @@ -248,16 +245,6 @@ class Math_degrees(Math_unary):
key = math.degrees


# class Math_hypot(ConcreteTemplate):
# key = math.hypot
# cases = [
# signature(types.float64, types.int64, types.int64),
# signature(types.float64, types.uint64, types.uint64),
# signature(types.float32, types.float32, types.float32),
# signature(types.float64, types.float64, types.float64),
# ]


class Math_erf(Math_unary):
key = math.erf

Expand Down Expand Up @@ -340,7 +327,6 @@ class Math_isinf(ConcreteTemplate):
infer_global(math.asinh, types.Function(Math_asinh))
infer_global(math.acosh, types.Function(Math_acosh))
infer_global(math.atanh, types.Function(Math_atanh))
# infer_global(math.hypot, types.Function(Math_hypot))
infer_global(math.floor, types.Function(Math_floor))
infer_global(math.ceil, types.Function(Math_ceil))
infer_global(math.trunc, types.Function(Math_trunc))
Expand Down
1 change: 0 additions & 1 deletion numba_dpex/ocl/mathimpl.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,6 @@ def function_name_to_supported_decl(name, sig):
return None

fn = _mk_fn_decl(name, sig)
# lower(key, *sig.args)(fn)
lower_ocl_impl[(name, sig)] = lower(key, *sig.args)(fn)


Expand Down
2 changes: 0 additions & 2 deletions numba_dpex/ocl/ocldecl.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,6 @@
intrinsic_attr = registry.register_attr
intrinsic_global = registry.register_global

# register_number_classes(intrinsic_global)


@intrinsic
class Ocl_get_global_id(ConcreteTemplate):
Expand Down
1 change: 0 additions & 1 deletion numba_dpex/parfor_diagnostics.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ def print_auto_offloading(self, lines):
tmp.append([])

summary = dict()
# region : {fused, serialized}

def print_nest(fadj_, nadj_, theroot, reported, region_id):
def print_g(fadj_, nadj_, nroot, depth):
Expand Down
3 changes: 1 addition & 2 deletions numba_dpex/tests/core/types/test_box_unbox.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
lambda: numpy.empty(10, dtype=numpy.float32),
lambda: dpnp.empty(10, dtype=dpnp.float32),
# TODO: is it possible to test USMNd array same way?
# lambda: dpx.USMNdArray(1, queue=None, dtype=dpx.float32),
# lambda: dpx.USMNdArray(1, queue=None, dtype=dpx.float32), # noqa: E800
]

ranges = [(10,), (10, 10), (10, 10, 10)]
Expand Down Expand Up @@ -56,7 +56,6 @@ def unbox_box(a):
return a


# @numba.njit
@dpx.dpjit
def unbox(a):
return None
Expand Down
8 changes: 4 additions & 4 deletions numba_dpex/tests/debugging/test_breakpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,13 @@ def test_breakpoint_with_condition_by_function_argument(app, breakpoint, api):
"breakpoint, script",
[
# location specified by file name and function name
# commands/break_file_func
# commands/break_file_func # noqa: E800
("simple_sum.py:data_parallel_sum", None),
# location specified by function name
# commands/break_func
# commands/break_func # noqa: E800
("data_parallel_sum", "simple_sum.py"),
# location specified by file name and nested function name
# commands/break_nested_func
# commands/break_nested_func # noqa: E800
("simple_dpex_func.py:func_sum", None),
],
)
Expand All @@ -95,7 +95,7 @@ def test_breakpoint_common(app, breakpoint, script):
@pytest.mark.parametrize(
"breakpoint, variable_name, variable_value",
[
# commands/break_conditional
# commands/break_conditional # noqa: E800
(f"{simple_sum_condition_breakpoint} if i == 1", "i", "1"),
],
)
Expand Down
2 changes: 0 additions & 2 deletions numba_dpex/tests/dpjit_tests/dpnp/test_dpnp_full.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,6 @@ def func(shape, fill_value):
"Returned queue does not have the same queue as cached against the device."
)

# dummy = dpnp.full(shape, fill_value, dtype=dtype)
# dpnp can't cast 4294967295 into int32 and so on,
# but we can, also numpy can, so we are using numpy here
dummy = numpy.full(shape, fill_value, dtype=dtype)
Expand Down Expand Up @@ -164,7 +163,6 @@ def func(shape, fill_value, queue):
"Returned queue does not have the same queue as the one passed to the dpnp function."
)

# dummy = dpnp.full(shape, fill_value, dtype=dtype)
# dpnp can't cast 4294967295 into int32 and so on,
# but we can, also numpy can, so we are using numpy here
dummy = numpy.full(shape, fill_value, dtype=dtype)
Expand Down
2 changes: 0 additions & 2 deletions numba_dpex/tests/dpjit_tests/dpnp/test_dpnp_full_like.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,6 @@ def func(x, fill_value):
"Returned queue does not have the same queue as cached against the device."
)

# dummy = dpnp.full_like(a, fill_value, dtype=dtype)
# dpnp can't cast 4294967295 into int32 and so on,
# but we can, also numpy can, so we are using numpy here
dummy = numpy.full_like(a.asnumpy(), fill_value, dtype=dtype)
Expand Down Expand Up @@ -161,7 +160,6 @@ def func(x, fill_value, queue):
assert c.sycl_queue == a.sycl_queue
assert c.sycl_queue == queue

# dummy = dpnp.full_like(a, fill_value, dtype=dtype)
# dpnp can't cast 4294967295 into int32 and so on,
# but we can, also numpy can, so we are using numpy here
dummy = numpy.full_like(a.asnumpy(), fill_value, dtype=dtype)
Expand Down
6 changes: 0 additions & 6 deletions numba_dpex/tests/kernel_tests/test_usm_ndarray_interop.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,6 @@
import numba_dpex as dpex
from numba_dpex.tests._helper import get_all_dtypes

# list_of_dtype = [
# numpy.int32,
# numpy.int64,
# numpy.float32,
# numpy.float64,
# ]
list_of_dtype = get_all_dtypes(
no_bool=True, no_float16=True, no_none=True, no_complex=True
)
Expand Down
1 change: 0 additions & 1 deletion numba_dpex/vectorizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,6 @@ def attempt_ravel(a):
return devout.reshape(outshape)
else:
# Otherwise, transfer output back to host
# return devout.copy_to_host().reshape(outshape)
raise ValueError("copy_to_host() is not yet supported")

elif cr.is_device_array(out):
Expand Down
Loading