From 4029cd7c9d93d168ed8244048d7757f78f1762ec Mon Sep 17 00:00:00 2001 From: Nikhilesh Bhatnagar Date: Fri, 18 Aug 2023 03:23:50 +0000 Subject: [PATCH] fix compatibility issue --- triton_models/ssmt_template_model_repo/1/model.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/triton_models/ssmt_template_model_repo/1/model.py b/triton_models/ssmt_template_model_repo/1/model.py index 084a1c1..5d41689 100644 --- a/triton_models/ssmt_template_model_repo/1/model.py +++ b/triton_models/ssmt_template_model_repo/1/model.py @@ -15,11 +15,17 @@ class TritonPythonModel: try: self.translator = Translator(f"{os.path.join(current_path, 'translator')}", device="cuda", intra_threads=1, inter_threads=1, device_index=[self.device_id]) except: self.translator = Translator(f"{os.path.join(current_path, 'translator')}", device="cpu", intra_threads=4) + def clean_output(self, text): + text = text.replace('@@ ', '') + if text.startswith(' '): text = text[8:] + if text.endswith(' '): text = text[:-8] + return text + def execute(self, requests): source_list = [pb_utils.get_input_tensor_by_name(request, "INPUT_TEXT_TOKENIZED") for request in requests] bsize_list = [source.as_numpy().shape[0] for source in source_list] src_sentences = [s[0].decode('utf-8').strip().split(' ') for source in source_list for s in source.as_numpy()] - tgt_sentences = [' '.join(result.hypotheses[0]).replace('@@ ', '').removeprefix(' ').removesuffix(' ') for result in self.translator.translate_iterable(src_sentences, max_batch_size=128, max_input_length=100, max_decoding_length=100)] + tgt_sentences = [self.clean_output(' '.join(result.hypotheses[0])) for result in self.translator.translate_iterable(src_sentences, max_batch_size=128, max_input_length=100, max_decoding_length=100)] responses = [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor("OUTPUT_TEXT", numpy.array([[s]for s in islice(tgt_sentences, bsize)], dtype='object').astype(self.target_dtype))]) for bsize in bsize_list] return responses -- GitLab