diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index b9a4990fa0a170..d0d877a7126efe 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -3831,16 +3831,18 @@ def clip( else: if in_dynamic_or_pir_mode(): - if isinstance(min, (Variable, paddle.pir.Value, paddle.Tensor)): + if isinstance(min, Variable): min = min.item(0) - if isinstance(max, (Variable, paddle.pir.Value, paddle.Tensor)): + if isinstance(max, Variable): max = max.item(0) min = min_ if min is None else min max = max_ if max is None else max return _C_ops.clip(x, min, max) else: if min is not None: - check_type(min, 'min', (float, int, Variable, paddle.Tensor), 'clip') + check_type( + min, 'min', (float, int, Variable, paddle.Tensor), 'clip' + ) if isinstance(min, (Variable, paddle.Tensor)): check_dtype( min.dtype, @@ -3850,7 +3852,9 @@ def clip( '(When the type of min in clip is Variable.)', ) if max is not None: - check_type(max, 'max', (float, int, Variable, paddle.Tensor), 'clip') + check_type( + max, 'max', (float, int, Variable, paddle.Tensor), 'clip' + ) if isinstance(max, (Variable, paddle.Tensor)): check_dtype( max.dtype, diff --git a/test/legacy_test/test_clip_tensor_op.py b/test/legacy_test/test_clip_tensor_op.py index 75450838ce6472..1cc69d851c294e 100644 --- a/test/legacy_test/test_clip_tensor_op.py +++ b/test/legacy_test/test_clip_tensor_op.py @@ -28,9 +28,13 @@ def test_static_clip(self): if base.core.is_compiled_with_cuda() else base.CPUPlace() ) - data = np.random.random(data_shape).astype('float32') - min_data = np.random.random(data_shape[-2:]).astype('float32') - max_data = np.random.random(data_shape[-3:]).astype('float32') + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float32') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float32' + ) + max_data = np.random.uniform(1.5, 2.5, data_shape[-3:]).astype( + 'float32' + ) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(name='x', shape=data_shape, dtype='float32') @@ -60,9 +64,11 @@ def test_static_clip(self): if base.core.is_compiled_with_cuda() else base.CPUPlace() ) - data = np.random.random(data_shape).astype('float32') - min_data = np.random.random(data_shape[-2:]).astype('float32') - max_data = np.random.random(data_shape[-3:]).astype('float32') + + data = np.random.uniform(-2.0, 7.0, data_shape).astype('float32') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float32' + ) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(name='x', shape=data_shape, dtype='float32') @@ -90,8 +96,10 @@ def test_static_clip(self): if base.core.is_compiled_with_cuda() else base.CPUPlace() ) - data = np.random.random(data_shape).astype('float32') - min_data = np.random.random(data_shape[-2:]).astype('float32') + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float32') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float32' + ) max_data = float(np.finfo(np.float32).max) paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): @@ -115,14 +123,14 @@ def test_static_clip(self): if base.core.is_compiled_with_cuda() else base.CPUPlace() ) - data = np.random.random(data_shape).astype('float32') - min_data = np.random.random([1]).astype('float32') - max_data = min_data + 10.0 + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float32') + min_data = np.random.uniform(-2.5, -1.5, []).astype('float32') + max_data = np.random.uniform(1.5, 2.5, []).astype('float32') paddle.enable_static() with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(name='x', shape=data_shape, dtype='float32') - min = paddle.static.data(name='min', shape=[1], dtype='float32') - max = paddle.static.data(name='max', shape=[1], dtype='float32') + min = paddle.static.data(name='min', shape=[], dtype='float32') + max = paddle.static.data(name='max', shape=[], dtype='float32') out = paddle.clip(x, min, max) exe = base.Executor(self.place) res = exe.run( @@ -169,9 +177,13 @@ def test_dygraph_clip(self): ) paddle.disable_static(self.place) data_shape = [1, 2, 3, 4] - data = np.random.random(data_shape).astype('float32') - min_data = np.random.random(data_shape[-2:]).astype('float32') - max_data = np.random.random(data_shape[-3:]).astype('float32') + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float32') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float32' + ) + max_data = np.random.uniform(1.5, 2.5, data_shape[-3:]).astype( + 'float32' + ) out_np = np.clip(data, min_data, max_data) data = paddle.to_tensor(data) min_data = paddle.to_tensor(min_data) @@ -180,9 +192,13 @@ def test_dygraph_clip(self): np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05) data_shape = [1, 2, 3, 4] - data = np.random.random(data_shape).astype('float32') - min_data = np.random.random(data_shape[-2:]).astype('float32') - max_data = np.random.random(data_shape[-1:]).astype('float32') + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float32') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float32' + ) + max_data = np.random.uniform(1.5, 2.5, data_shape[-1:]).astype( + 'float32' + ) out_np = np.clip(data, min_data, max_data) data = paddle.to_tensor(data) min_data = paddle.to_tensor(min_data) @@ -191,19 +207,23 @@ def test_dygraph_clip(self): np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05) data_shape = [1, 2, 3, 4] - data = np.random.random(data_shape).astype('int32') - min_data = np.random.random(data_shape[-2:]).astype('int32') + data = np.random.uniform(-2.0, 7.0, data_shape).astype('int32') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'int32' + ) max_data = 5 out_np = np.clip(data, min_data, max_data) - data = paddle.to_tensor(data) - min_data = paddle.to_tensor(min_data) + data = paddle.to_tensor(data, dtype='int32') + min_data = paddle.to_tensor(min_data, dtype='int32') max_data = paddle.to_tensor([max_data], dtype='int32') out = paddle.clip(data, min_data, max_data) np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05) data_shape = [1, 2, 3, 4] - data = np.random.random(data_shape).astype('float32') - min_data = np.random.random(data_shape[-2:]).astype('float32') + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float32') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float32' + ) max_data = float(np.finfo(np.float32).max) out_np = np.clip(data, min_data, max_data) data = paddle.to_tensor(data) @@ -212,8 +232,10 @@ def test_dygraph_clip(self): np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05) data_shape = [1, 2, 3, 4] - data = np.random.random(data_shape).astype('float32') - min_data = np.random.random(data_shape[-2:]).astype('float32') + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float32') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float32' + ) max_data = 5 out_np = np.clip(data, min_data, max_data) data = paddle.to_tensor(data) @@ -223,9 +245,13 @@ def test_dygraph_clip(self): if base.core.is_compiled_with_cuda(): data_shape = [1, 2, 3, 4] - data = np.random.random(data_shape).astype('float16') - min_data = np.random.random(data_shape[-2:]).astype('float16') - max_data = np.random.random(data_shape[-1:]).astype('float16') + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float16') + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float16' + ) + max_data = np.random.uniform(1.5, 2.5, data_shape[-1:]).astype( + 'float16' + ) out_np = np.clip(data, min_data, max_data) data = paddle.to_tensor(data) min_data = paddle.to_tensor(min_data) @@ -263,11 +289,16 @@ def test_shapeerror_clip(self): def test_tensor_clip_(self): data_shape = [1, 9, 9, 4] - data = paddle.to_tensor(np.random.random(data_shape).astype('float32')) - min = paddle.to_tensor( - np.random.random(data_shape[-2:]).astype('float32') + data = np.random.uniform(-2.0, 2.0, data_shape).astype('float32') + data = paddle.to_tensor(data) + min_data = np.random.uniform(-2.5, -1.5, data_shape[-2:]).astype( + 'float32' + ) + min = paddle.to_tensor(min_data) + max_data = np.random.uniform(1.5, 2.5, data_shape[-3:]).astype( + 'float32' ) - max = min + 5 + max = paddle.to_tensor(max_data) data.clip_(min, max)