Skip to content

Commit

Permalink
test
Browse files Browse the repository at this point in the history
  • Loading branch information
a162837 committed Nov 10, 2024
1 parent 30d7c4f commit 23adff1
Show file tree
Hide file tree
Showing 2 changed files with 225 additions and 65 deletions.
230 changes: 165 additions & 65 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -3706,10 +3706,33 @@ def log10_(x: Tensor, name: str | None = None) -> Tensor:
return _C_ops.log10_(x)


def check_clip_tensor(c_x, value, re_value, value_type, name):
if value is None:
value = paddle.full_like(c_x, re_value, value_type)
else:
if isinstance(value, (Variable, paddle.pir.Value, paddle.Tensor)):
if len(value.shape) == 1 and value.shape[-1] == 0:
raise ValueError(
f"The {name} dimension should be equal to the inner dimension of the x, but the {name} dimension is {value.shape}"
)
elif (
len(value.shape) != 0
and value.shape != c_x.shape[-len(value.shape) :]
and value.shape != [1]
and value.shape != (1,)
):
raise ValueError(
f"The {name} dimension should be equal to the inner dimension of the x, but the {name} dimension is {value.shape} and the x dimension is {c_x.shape[-len(value.shape):]}."
)
else:
value = paddle.full_like(c_x, value, value_type)
return value


def clip(
x: Tensor,
min: float | None = None,
max: float | None = None,
min: float | Tensor | None = None,
max: float | Tensor | None = None,
name: str | None = None,
) -> Tensor:
"""
Expand Down Expand Up @@ -3753,84 +3776,125 @@ def clip(
if x_dtype == 'paddle.int32':
min_ = np.iinfo(np.int32).min
max_ = np.iinfo(np.int32).max - 2**7
tensor_dtype = 'int32'
elif x_dtype == 'paddle.int64':
min_ = np.iinfo(np.int64).min
max_ = np.iinfo(np.int64).max - 2**39
tensor_dtype = 'int64'
elif x_dtype == 'paddle.float16':
min_ = float(np.finfo(np.float16).min)
max_ = float(np.finfo(np.float16).max)
tensor_dtype = 'float16'
else:
min_ = float(np.finfo(np.float32).min)
max_ = float(np.finfo(np.float32).max)
tensor_dtype = 'float32'

if (
isinstance(min, Variable)
and (len(min.shape) > 1 or (len(min.shape == 1) and min.shape[-1] != 1))
) or (
isinstance(max, Variable)
and (len(max.shape) > 1 or (len(max.shape == 1) and max.shape[-1] != 1))
):
min = paddle.full_like(x, min_, tensor_dtype) if min is None else min
max = paddle.full_like(x, max_, tensor_dtype) if max is None else max
min = (
paddle.full_like(x, min, tensor_dtype)
if not isinstance(min, Variable)
else min
)
max = (
paddle.full_like(x, max, tensor_dtype)
if not isinstance(max, Variable)
else max
)

if in_dynamic_or_pir_mode():
if isinstance(min, Variable):
min = min.item(0)
if isinstance(max, Variable):
max = max.item(0)
min = min_ if min is None else min
max = max_ if max is None else max
return _C_ops.clip(x, min, max)
if (len(min.shape) == 1 and min.shape[-1] == 0) or min.shape != x.shape[
-len(min.shape) :
]:
raise ValueError(
f"The min dimension should be equal to the inner dimension of the x, but the min dimension is {min.shape}"
)

if (len(max.shape) == 1 and max.shape[-1] == 0) or max.shape != x.shape[
-len(max.shape) :
]:
raise ValueError(
f"The max dimension should be equal to the inner dimension of the x, but the max dimension is {max.shape}"
)
else:
if min is not None:
check_type(min, 'min', (float, int, Variable), 'clip')
if in_dynamic_or_pir_mode():
if isinstance(min, Variable):
check_dtype(
min.dtype,
'min',
['float16', 'float32', 'float64', 'int32', 'uint16'],
'clip',
'(When the type of min in clip is Variable.)',
)
if max is not None:
check_type(max, 'max', (float, int, Variable), 'clip')
min = min.item(0)
if isinstance(max, Variable):
check_dtype(
max.dtype,
'max',
['float16', 'float32', 'float64', 'int32', 'uint16'],
'clip',
'(When the type of max in clip is Variable.)',
)

check_variable_and_dtype(
x,
'x',
['float16', 'float32', 'float64', 'int32', 'int64', 'uint16'],
'clip',
)
max = max.item(0)
min = min_ if min is None else min
max = max_ if max is None else max
return _C_ops.clip(x, min, max)
else:
if min is not None:
check_type(min, 'min', (float, int, Variable), 'clip')
if isinstance(min, Variable):
check_dtype(
min.dtype,
'min',
['float16', 'float32', 'float64', 'int32', 'uint16'],
'clip',
'(When the type of min in clip is Variable.)',
)
if max is not None:
check_type(max, 'max', (float, int, Variable), 'clip')
if isinstance(max, Variable):
check_dtype(
max.dtype,
'max',
['float16', 'float32', 'float64', 'int32', 'uint16'],
'clip',
'(When the type of max in clip is Variable.)',
)

inputs = {'X': x}
attrs = {'min': min_, 'max': max_}
check_variable_and_dtype(
x,
'x',
['float16', 'float32', 'float64', 'int32', 'int64', 'uint16'],
'clip',
)

if isinstance(min, Variable):
min.stop_gradient = True
inputs['Min'] = min
elif min is not None:
attrs['min'] = min
inputs = {'X': x}
attrs = {'min': min_, 'max': max_}

if isinstance(max, Variable):
max.stop_gradient = True
inputs['Max'] = max
elif max is not None:
attrs['max'] = max
if isinstance(min, Variable):
min.stop_gradient = True
inputs['Min'] = min
elif min is not None:
attrs['min'] = min

helper = LayerHelper('clip', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x')
)
helper.append_op(
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs
)
if isinstance(max, Variable):
max.stop_gradient = True
inputs['Max'] = max
elif max is not None:
attrs['max'] = max

helper = LayerHelper('clip', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x')
)
helper.append_op(
type='clip',
inputs=inputs,
outputs={'Out': [output]},
attrs=attrs,
)

return output
return output


@inplace_apis_in_dygraph_only
def clip_(
x: Tensor,
min: float | None = None,
max: float | None = None,
min: float | Tensor | None = None,
max: float | Tensor | None = None,
name: str | None = None,
) -> Tensor:
"""
Expand All @@ -3839,15 +3903,51 @@ def clip_(
"""
fmin = float(np.finfo(np.float32).min)
fmax = float(np.finfo(np.float32).max)
if isinstance(min, Variable):
min = min.item(0)
if isinstance(max, Variable):
max = max.item(0)
min = fmin if min is None else min
max = fmax if max is None else max
tensor_dtype = 'float32'

if (
isinstance(min, Variable)
and (len(min.shape) > 1 or (len(min.shape == 1) and min.shape[-1] != 1))
) or (
isinstance(max, Variable)
and (len(max.shape) > 1 or (len(max.shape == 1) and max.shape[-1] != 1))
):
min = paddle.full_like(x, fmin, tensor_dtype) if min is None else min
max = paddle.full_like(x, fmax, tensor_dtype) if max is None else max
min = (
paddle.full_like(x, min, tensor_dtype)
if not isinstance(min, Variable)
else min
)
max = (
paddle.full_like(x, max, tensor_dtype)
if not isinstance(max, Variable)
else max
)

if in_dynamic_mode():
return _C_ops.clip_(x, min, max)
if (len(min.shape) == 1 and min.shape[-1] == 0) or min.shape != x.shape[
-len(min.shape) :
]:
raise ValueError(
f"The min dimension should be equal to the inner dimension of the x, but the min dimension is {min.shape}"
)

if (len(max.shape) == 1 and max.shape[-1] == 0) or max.shape != x.shape[
-len(max.shape) :
]:
raise ValueError(
f"The max dimension should be equal to the inner dimension of the x, but the max dimension is {max.shape}"
)
else:
if isinstance(min, Variable):
min = min.item(0)
if isinstance(max, Variable):
max = max.item(0)
min = fmin if min is None else min
max = fmax if max is None else max

if in_dynamic_mode():
return _C_ops.clip_(x, min, max)


def trace(
Expand Down
60 changes: 60 additions & 0 deletions test/legacy_test/test_clip_tensor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import paddle


class TestClipTenosr(unittest.TestCase):

def test_shape_error(self):
paddle.disable_static()

def test_min_error():
x = paddle.randn([3, 5, 8, 10], dtype='float16')
min = paddle.randn([8, 3], dtype='float16')
paddle.clip(x, min)

self.assertRaises(ValueError, test_min_error)

def test_max_error():
x = paddle.randn([3, 5, 8, 10], dtype='float32')
max = paddle.randn([8, 3], dtype='float32')
paddle.clip(x, -5.0, max)

self.assertRaises(ValueError, test_max_error)


class TestInplaceClipTensorAPI(unittest.TestCase):
def test_shape_error(self):
paddle.disable_static()

def test_min_error():
x = paddle.randn([3, 5, 8, 10], dtype='float16')
min = paddle.randn([8, 3], dtype='float16')
paddle.clip_(x, min)

self.assertRaises(ValueError, test_min_error)

def test_max_error():
x = paddle.randn([3, 5, 8, 10], dtype='float32')
max = paddle.randn([8, 3], dtype='float32')
paddle.clip_(x, -5.0, max)

self.assertRaises(ValueError, test_max_error)


if __name__ == '__main__':
unittest.main()

0 comments on commit 23adff1

Please sign in to comment.