Skip to content

Commit

Permalink
fix(server): recover original contents of the input da
Browse files Browse the repository at this point in the history
  • Loading branch information
hanxiao committed May 23, 2022
1 parent 5e9504c commit ca50323
Show file tree
Hide file tree
Showing 4 changed files with 21 additions and 15 deletions.
10 changes: 6 additions & 4 deletions server/clip_server/executors/clip_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,9 @@ async def encode(self, docs: 'DocumentArray', **kwargs):
):
minibatch.embeddings = self._model.encode_image(minibatch.tensors)
# recover original content
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct
if _contents:
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct

# for text
if _txt_da:
Expand All @@ -115,8 +116,9 @@ async def encode(self, docs: 'DocumentArray', **kwargs):
):
minibatch.embeddings = self._model.encode_text(minibatch.tensors)
# recover original content
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct
if _contents:
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct

# drop tensors
docs.tensors = None
Expand Down
12 changes: 7 additions & 5 deletions server/clip_server/executors/clip_tensorrt.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,12 +80,13 @@ async def encode(self, docs: 'DocumentArray', **kwargs):
.astype(np.float32)
)
# recover original content
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct
if _contents:
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct

# for text
if _txt_da:
for minibatch, _texts in _txt_da.map_batch(
for minibatch, _contents in _txt_da.map_batch(
partial(preproc_text, device=self._device, return_np=False),
batch_size=self._minibatch_size,
pool=self._pool,
Expand All @@ -98,8 +99,9 @@ async def encode(self, docs: 'DocumentArray', **kwargs):
.astype(np.float32)
)
# recover original content
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct
if _contents:
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct

# drop tensors
docs.tensors = None
Expand Down
10 changes: 6 additions & 4 deletions server/clip_server/executors/clip_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,9 @@ async def encode(self, docs: 'DocumentArray', **kwargs):
)

# recover original content
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct
if _contents:
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct

# for text
if _txt_da:
Expand All @@ -113,8 +114,9 @@ async def encode(self, docs: 'DocumentArray', **kwargs):
)

# recover original content
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct
if _contents:
for _d, _ct in zip(minibatch, _contents):
_d.content = _ct

# drop tensors
docs.tensors = None
Expand Down
4 changes: 2 additions & 2 deletions server/clip_server/executors/helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,10 +94,10 @@ def set_rank(docs, _logit_scale=np.exp(4.60517)):

start_idx = end_idx

_candidates.embeddings = None # remove embedding to save bandwidth

final = sorted(
_candidates, key=lambda _m: _m.scores['clip_score'].value, reverse=True
)

final.embeddings = None # remove embedding to save bandwidth

q.matches = final

0 comments on commit ca50323

Please sign in to comment.