diff --git a/triton_models/ssmt_tokenizer/1/model.py b/triton_models/ssmt_tokenizer/1/model.py index 873b82ab6b40ccf546b9d873f8f63589ba044b0c..06d9cf859b14fb7ea96d950e2b7561eaac0b8fa3 100644 --- a/triton_models/ssmt_tokenizer/1/model.py +++ b/triton_models/ssmt_tokenizer/1/model.py @@ -21,8 +21,8 @@ class TritonPythonModel: def execute(self, requests): source_gen = ((pb_utils.get_input_tensor_by_name(request, "INPUT_TEXT"), pb_utils.get_input_tensor_by_name(request, "INPUT_LANGUAGE_ID"), pb_utils.get_input_tensor_by_name(request, "OUTPUT_LANGUAGE_ID")) for request in requests) - tokenized_gen = (self.tokenize_and_segment(input_text.as_numpy()[0, 0].decode('utf-8'), input_language_id.as_numpy()[0, 0].decode('utf-8'), output_language_id.as_numpy()[0, 0].decode('utf-8')) for input_text, input_language_id, output_language_id in source_gen) - responses = [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor("INPUT_TEXT_TOKENIZED", numpy.array([[tokenized_sent]], dtype=self.target_dtype))]) for tokenized_sent in tokenized_gen] + tokenized_gen = ((self.tokenize_and_segment(input_text[0].decode('utf-8'), input_language_id[0].decode('utf-8'), output_language_id[0].decode('utf-8')) for input_text, input_language_id, output_language_id in zip(input_texts.as_numpy(), input_language_ids.as_numpy(), output_language_ids.as_numpy())) for input_texts, input_language_ids, output_language_ids in source_gen) + responses = [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor("INPUT_TEXT_TOKENIZED", numpy.array([[tokenized_sent] for tokenized_sent in tokenized_sents], dtype=self.target_dtype))]) for tokenized_sents in tokenized_gen] return responses def finalize(self): pass