Commit da7746e4 authored by Nikhilesh Bhatnagar's avatar Nikhilesh Bhatnagar

Fixes for en-gu and gu-en models

parent cf66dd5e
......@@ -19,7 +19,7 @@ ct2-opennmt-py-converter --model_path 9.pt --output_dir ./9_ct2
cd ..
mkdir ssmt_triton_repo
cd ssmt_triton_repo
cp -r ../triton_models/ssmt_pipeline .
cp -r ../triton_models/ssmt_pipeline nmt
cp -r ../triton_models/ssmt_model_demuxer .
cp -r ../triton_models/ssmt_tokenizer .
cp -r ../models/*.src ssmt_tokenizer/1/bpe_src
......
......@@ -39,7 +39,7 @@
"source": [
"shape = [1]\n",
"MIN_WORDS, MAX_WORDS = 4, 20\n",
"model_name = \"ssmt_pipeline\"\n",
"model_name = \"nmt\"\n",
"rs = wonderwords.RandomWord()"
]
},
......
name: "ssmt_pipeline"
name: "nmt"
platform: "ensemble"
max_batch_size: 4096
......
......@@ -19,7 +19,7 @@ class TritonPythonModel:
source_list = [pb_utils.get_input_tensor_by_name(request, "INPUT_TEXT_TOKENIZED") for request in requests]
bsize_list = [source.as_numpy().shape[0] for source in source_list]
src_sentences = [s[0].decode('utf-8').strip().split(' ') for source in source_list for s in source.as_numpy()]
tgt_sentences = [' '.join(result.hypotheses[0]).replace('@@ ', '') for result in self.translator.translate_iterable(src_sentences, max_batch_size=128, max_input_length=100, max_decoding_length=100)]
tgt_sentences = [' '.join(result.hypotheses[0]).replace('@@ ', '').removeprefix('<to-gu> ').removesuffix(' <to-gu>') for result in self.translator.translate_iterable(src_sentences, max_batch_size=128, max_input_length=100, max_decoding_length=100)]
responses = [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor("OUTPUT_TEXT", numpy.array([[s]for s in islice(tgt_sentences, bsize)], dtype='object').astype(self.target_dtype))]) for bsize in bsize_list]
return responses
......
......@@ -14,9 +14,14 @@ class TritonPythonModel:
self.lang_pair_map = {'en-hi': 1, 'hi-en': 2, 'en-te': 3, 'te-en': 4, 'hi-te': 6, 'te-hi': 7, 'en-gu': 8, 'gu-en': 9}
self.bpes = {lang_pair: BPE(open(os.path.join(current_path, f'bpe_src/{model_id}.src'), encoding='utf-8')) for lang_pair, model_id in self.lang_pair_map.items()}
def tokenize_and_segment(self, input_text, source_lang, target_lang):
tokenized_text = tokenizer.tokenize(input_text)
if source_lang == 'en' and target_lang == 'gu': tokenized_text = f'<to-gu> {tokenized_text} <to-gu>'
return self.bpes[f'{source_lang}-{target_lang}'].segment(tokenized_text).strip()
def execute(self, requests):
source_gen = ((pb_utils.get_input_tensor_by_name(request, "INPUT_TEXT"), pb_utils.get_input_tensor_by_name(request, "INPUT_LANGUAGE_ID"), pb_utils.get_input_tensor_by_name(request, "OUTPUT_LANGUAGE_ID")) for request in requests)
tokenized_gen = (self.bpes[f"{input_language_id.as_numpy()[0, 0].decode('utf-8')}-{output_language_id.as_numpy()[0, 0].decode('utf-8')}"].segment(tokenizer.tokenize(input_text.as_numpy()[0, 0].decode('utf-8'))).strip() for input_text, input_language_id, output_language_id in source_gen)
tokenized_gen = (self.tokenize_and_segment(input_text.as_numpy()[0, 0].decode('utf-8'), input_language_id.as_numpy()[0, 0].decode('utf-8'), output_language_id.as_numpy()[0, 0].decode('utf-8')) for input_text, input_language_id, output_language_id in source_gen)
responses = [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor("INPUT_TEXT_TOKENIZED", numpy.array([[tokenized_sent]], dtype=self.target_dtype))]) for tokenized_sent in tokenized_gen]
return responses
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment