diff --git a/server/clip_server/executors/clip_onnx.py b/server/clip_server/executors/clip_onnx.py index 860c5deb3..204eb7648 100644 --- a/server/clip_server/executors/clip_onnx.py +++ b/server/clip_server/executors/clip_onnx.py @@ -68,7 +68,7 @@ def __init__( self._model = CLIPOnnxModel(name, model_path, dtype) self._tokenizer = Tokenizer(name) - self._image_transform = clip._transform_ndarray(self._model.image_size) + self._image_transform = clip._transform_blob(self._model.image_size) # define the priority order for the execution providers providers = ['CPUExecutionProvider'] diff --git a/server/clip_server/executors/clip_tensorrt.py b/server/clip_server/executors/clip_tensorrt.py index 62176fe4c..28f4ddb9c 100644 --- a/server/clip_server/executors/clip_tensorrt.py +++ b/server/clip_server/executors/clip_tensorrt.py @@ -68,7 +68,7 @@ def __init__( self._model.start_engines() self._tokenizer = Tokenizer(name) - self._image_transform = clip._transform_ndarray(self._model.image_size) + self._image_transform = clip._transform_blob(self._model.image_size) if not self.tracer: self.tracer = NoOpTracer() diff --git a/server/clip_server/executors/clip_torch.py b/server/clip_server/executors/clip_torch.py index 7ed895eb2..78fe4f772 100644 --- a/server/clip_server/executors/clip_torch.py +++ b/server/clip_server/executors/clip_torch.py @@ -92,7 +92,7 @@ def __init__( name, device=self._device, jit=jit, dtype=dtype, **kwargs ) self._tokenizer = Tokenizer(name) - self._image_transform = clip._transform_ndarray(self._model.image_size) + self._image_transform = clip._transform_blob(self._model.image_size) if not self.tracer: self.tracer = NoOpTracer() diff --git a/server/clip_server/executors/helper.py b/server/clip_server/executors/helper.py index 594cd88a5..fa3846cad 100644 --- a/server/clip_server/executors/helper.py +++ b/server/clip_server/executors/helper.py @@ -33,14 +33,13 @@ def preproc_image( for d in da: content = d.content - - if d.blob: - d.convert_blob_to_image_tensor() - elif d.tensor is None and d.uri: + if d.tensor is not None: + d.convert_image_tensor_to_blob() + elif d.content_type != 'blob' and d.uri: # in case user uses HTTP protocol and send data via curl not using .blob (base64), but in .uri - d.load_uri_to_image_tensor() + d.load_uri_to_blob() - tensors_batch.append(preprocess_fn(d.tensor).detach()) + tensors_batch.append(preprocess_fn(d.blob).detach()) # recover doc content d.content = content diff --git a/tests/test_helper.py b/tests/test_helper.py index 3494d0583..9836b0da9 100644 --- a/tests/test_helper.py +++ b/tests/test_helper.py @@ -87,7 +87,7 @@ def test_split_img_txt_da(inputs): [ Document( uri='https://clip-as-service.jina.ai/_static/favicon.png', - ).load_uri_to_blob(), + ).load_uri_to_image_tensor(), ] ) ], @@ -95,7 +95,7 @@ def test_split_img_txt_da(inputs): def test_preproc_image(inputs): from clip_server.model import clip - preprocess_fn = clip._transform_ndarray(224) + preprocess_fn = clip._transform_blob(224) da, pixel_values = preproc_image(inputs, preprocess_fn, drop_image_content=True) assert len(da) == 1 assert not da[0].blob