Commit d0049da2 authored by Nikhilesh Bhatnagar's avatar Nikhilesh Bhatnagar

Formatting pass.

parent f61cdc30
ssmt_triton_repo
himangy_triton_repo
\ No newline at end of file
......@@ -3,7 +3,67 @@ import numpy
import asyncio
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
def initialize(self, args): self.target_dtype = pb_utils.triton_string_to_numpy(pb_utils.get_output_config_by_name(json.loads(args['model_config']), 'OUTPUT_TEXT')['data_type'])
async def execute(self, requests): return [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor('OUTPUT_TEXT', numpy.array([[pb_utils.get_output_tensor_by_name(result, 'OUTPUT_SENT').as_numpy()[0, 0].decode('utf-8')] for result in (await asyncio.gather(*awaits))], dtype=self.target_dtype))]) for awaits in [[pb_utils.InferenceRequest(model_name=f"himangy-{input_language_id[0].decode('utf-8')}-{output_language_id[0].decode('utf-8')}", requested_output_names=['OUTPUT_SENT'], inputs=[pb_utils.Tensor('INPUT_SENT_TOKENIZED', numpy.array([[input_text_tokenized[0].decode('utf-8')]], dtype='object'))]).async_exec() for input_text_tokenized, input_language_id, output_language_id in zip(pb_utils.get_input_tensor_by_name(request, 'INPUT_TEXT_TOKENIZED').as_numpy(), pb_utils.get_input_tensor_by_name(request, 'INPUT_LANGUAGE_ID').as_numpy(), pb_utils.get_input_tensor_by_name(request, 'OUTPUT_LANGUAGE_ID').as_numpy())] for request in requests]]
def finalize(self): pass
\ No newline at end of file
def initialize(self, args):
self.target_dtype = pb_utils.triton_string_to_numpy(
pb_utils.get_output_config_by_name(
json.loads(args["model_config"]), "OUTPUT_TEXT"
)["data_type"]
)
async def execute(self, requests):
return [
pb_utils.InferenceResponse(
output_tensors=[
pb_utils.Tensor(
"OUTPUT_TEXT",
numpy.array(
[
[
pb_utils.get_output_tensor_by_name(
result, "OUTPUT_SENT"
)
.as_numpy()[0, 0]
.decode("utf-8")
]
for result in (await asyncio.gather(*awaits))
],
dtype=self.target_dtype,
),
)
]
)
for awaits in [
[
pb_utils.InferenceRequest(
model_name=f"himangy-{input_language_id[0].decode('utf-8')}-{output_language_id[0].decode('utf-8')}",
requested_output_names=["OUTPUT_SENT"],
inputs=[
pb_utils.Tensor(
"INPUT_SENT_TOKENIZED",
numpy.array(
[[input_text_tokenized[0].decode("utf-8")]],
dtype="object",
),
)
],
).async_exec()
for input_text_tokenized, input_language_id, output_language_id in zip(
pb_utils.get_input_tensor_by_name(
request, "INPUT_TEXT_TOKENIZED"
).as_numpy(),
pb_utils.get_input_tensor_by_name(
request, "INPUT_LANGUAGE_ID"
).as_numpy(),
pb_utils.get_input_tensor_by_name(
request, "OUTPUT_LANGUAGE_ID"
).as_numpy(),
)
]
for request in requests
]
]
def finalize(self):
pass
......@@ -39,4 +39,4 @@ instance_group [
count: 1
kind: KIND_CPU
}
]
\ No newline at end of file
]
......@@ -5,27 +5,75 @@ from itertools import islice
from ctranslate2 import Translator
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
def initialize(self, args):
current_path = os.path.dirname(os.path.abspath(__file__))
self.source_lang, self.target_lang = input_lang, output_lang
self.model_config = json.loads(args["model_config"])
self.device_id = int(json.loads(args['model_instance_device_id']))
target_config = pb_utils.get_output_config_by_name(self.model_config, "OUTPUT_SENT")
self.device_id = int(json.loads(args["model_instance_device_id"]))
target_config = pb_utils.get_output_config_by_name(
self.model_config, "OUTPUT_SENT"
)
self.target_dtype = pb_utils.triton_string_to_numpy(target_config["data_type"])
try: self.translator = Translator(f"{os.path.join(current_path, 'translator')}", device="cuda", intra_threads=1, inter_threads=1, device_index=[self.device_id])
except: self.translator = Translator(f"{os.path.join(current_path, 'translator')}", device="cpu", intra_threads=4)
try:
self.translator = Translator(
f"{os.path.join(current_path, 'translator')}",
device="cuda",
intra_threads=1,
inter_threads=1,
device_index=[self.device_id],
)
except:
self.translator = Translator(
f"{os.path.join(current_path, 'translator')}",
device="cpu",
intra_threads=4,
)
def clean_output(self, text):
text = text.replace('@@ ', '')
text = text.replace('\u200c', '')
if text.startswith('<to-gu> '): text = text[8:]
if text.endswith(' <to-gu>'): text = text[:-8]
text = text.replace("@@ ", "")
text = text.replace("\u200c", "")
if text.startswith("<to-gu> "):
text = text[8:]
if text.endswith(" <to-gu>"):
text = text[:-8]
return text
def execute(self, requests):
source_list = [pb_utils.get_input_tensor_by_name(request, "INPUT_SENT_TOKENIZED") for request in requests]
source_list = [
pb_utils.get_input_tensor_by_name(request, "INPUT_SENT_TOKENIZED")
for request in requests
]
bsize_list = [source.as_numpy().shape[0] for source in source_list]
src_sentences = [s[0].decode('utf-8').strip().split(' ') for source in source_list for s in source.as_numpy()]
tgt_sentences = [self.clean_output(' '.join(result.hypotheses[0])) for result in self.translator.translate_iterable(src_sentences, max_batch_size=128, max_input_length=100, max_decoding_length=100)]
responses = [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor("OUTPUT_SENT", numpy.array([[s]for s in islice(tgt_sentences, bsize)], dtype='object').astype(self.target_dtype))]) for bsize in bsize_list]
src_sentences = [
s[0].decode("utf-8").strip().split(" ")
for source in source_list
for s in source.as_numpy()
]
tgt_sentences = [
self.clean_output(" ".join(result.hypotheses[0]))
for result in self.translator.translate_iterable(
src_sentences,
max_batch_size=128,
max_input_length=100,
max_decoding_length=100,
)
]
responses = [
pb_utils.InferenceResponse(
output_tensors=[
pb_utils.Tensor(
"OUTPUT_SENT",
numpy.array(
[[s] for s in islice(tgt_sentences, bsize)], dtype="object"
).astype(self.target_dtype),
)
]
)
for bsize in bsize_list
]
return responses
def finalize(self): self.translator.unload_model()
\ No newline at end of file
def finalize(self):
self.translator.unload_model()
......@@ -29,4 +29,4 @@ instance_group [
response_cache {
enable: true
}
\ No newline at end of file
}
......@@ -6,27 +6,128 @@ from argparse import Namespace
import triton_python_backend_utils as pb_utils
from onmt.translate.translator import build_translator
class TritonPythonModel:
def initialize(self, args):
current_path = os.path.dirname(os.path.abspath(__file__))
self.source_lang, self.target_lang = input_lang, output_lang
self.model_config = json.loads(args["model_config"])
self.device_id = int(json.loads(args['model_instance_device_id']))
target_config = pb_utils.get_output_config_by_name(self.model_config, "OUTPUT_SENT")
self.device_id = int(json.loads(args["model_instance_device_id"]))
target_config = pb_utils.get_output_config_by_name(
self.model_config, "OUTPUT_SENT"
)
self.target_dtype = pb_utils.triton_string_to_numpy(target_config["data_type"])
try: self.translator = build_translator(Namespace(tgt_prefix=False, alpha=0.0, batch_type='sents', beam_size=5, beta=-0.0, block_ngram_repeat=0, coverage_penalty='none', data_type='text', dump_beam='', fp32=True, gpu=self.device_id, ignore_when_blocking=[], length_penalty='none', max_length=100, max_sent_length=None, min_length=0, models=[f"{os.path.join(current_path, 'translator.pt')}"], n_best=1, output='/dev/null', phrase_table='', random_sampling_temp=1.0, random_sampling_topk=1, ratio=-0.0, replace_unk=False, report_align=False, report_time=False, seed=829, stepwise_penalty=False, tgt=None, verbose=False), report_score=False)
except: self.translator = build_translator(Namespace(tgt_prefix=False, alpha=0.0, batch_type='sents', beam_size=5, beta=-0.0, block_ngram_repeat=0, coverage_penalty='none', data_type='text', dump_beam='', fp32=True, gpu=-1, ignore_when_blocking=[], length_penalty='none', max_length=100, max_sent_length=None, min_length=0, models=[f"{os.path.join(current_path, 'translator.pt')}"], n_best=1, output='/dev/null', phrase_table='', random_sampling_temp=1.0, random_sampling_topk=1, ratio=-0.0, replace_unk=False, report_align=False, report_time=False, seed=829, stepwise_penalty=False, tgt=None, verbose=False), report_score=False)
try:
self.translator = build_translator(
Namespace(
tgt_prefix=False,
alpha=0.0,
batch_type="sents",
beam_size=5,
beta=-0.0,
block_ngram_repeat=0,
coverage_penalty="none",
data_type="text",
dump_beam="",
fp32=True,
gpu=self.device_id,
ignore_when_blocking=[],
length_penalty="none",
max_length=100,
max_sent_length=None,
min_length=0,
models=[f"{os.path.join(current_path, 'translator.pt')}"],
n_best=1,
output="/dev/null",
phrase_table="",
random_sampling_temp=1.0,
random_sampling_topk=1,
ratio=-0.0,
replace_unk=False,
report_align=False,
report_time=False,
seed=829,
stepwise_penalty=False,
tgt=None,
verbose=False,
),
report_score=False,
)
except:
self.translator = build_translator(
Namespace(
tgt_prefix=False,
alpha=0.0,
batch_type="sents",
beam_size=5,
beta=-0.0,
block_ngram_repeat=0,
coverage_penalty="none",
data_type="text",
dump_beam="",
fp32=True,
gpu=-1,
ignore_when_blocking=[],
length_penalty="none",
max_length=100,
max_sent_length=None,
min_length=0,
models=[f"{os.path.join(current_path, 'translator.pt')}"],
n_best=1,
output="/dev/null",
phrase_table="",
random_sampling_temp=1.0,
random_sampling_topk=1,
ratio=-0.0,
replace_unk=False,
report_align=False,
report_time=False,
seed=829,
stepwise_penalty=False,
tgt=None,
verbose=False,
),
report_score=False,
)
def clean_output(self, text):
text = text.replace('@@ ', '')
text = text.replace('\u200c', '')
if text.startswith('<to-gu> '): text = text[8:]
if text.endswith(' <to-gu>'): text = text[:-8]
text = text.replace("@@ ", "")
text = text.replace("\u200c", "")
if text.startswith("<to-gu> "):
text = text[8:]
if text.endswith(" <to-gu>"):
text = text[:-8]
return text
def execute(self, requests):
source_list = [pb_utils.get_input_tensor_by_name(request, "INPUT_SENT_TOKENIZED") for request in requests]
source_list = [
pb_utils.get_input_tensor_by_name(request, "INPUT_SENT_TOKENIZED")
for request in requests
]
bsize_list = [source.as_numpy().shape[0] for source in source_list]
src_sentences = [s[0].decode('utf-8').strip().split(' ') for source in source_list for s in source.as_numpy()]
tgt_sentences = [self.clean_output(result[0]) for result in self.translator.translate(src_sentences, batch_size=128)[1]]
responses = [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor("OUTPUT_SENT", numpy.array([[s]for s in islice(tgt_sentences, bsize)], dtype='object').astype(self.target_dtype))]) for bsize in bsize_list]
src_sentences = [
s[0].decode("utf-8").strip().split(" ")
for source in source_list
for s in source.as_numpy()
]
tgt_sentences = [
self.clean_output(result[0])
for result in self.translator.translate(src_sentences, batch_size=128)[1]
]
responses = [
pb_utils.InferenceResponse(
output_tensors=[
pb_utils.Tensor(
"OUTPUT_SENT",
numpy.array(
[[s] for s in islice(tgt_sentences, bsize)], dtype="object"
).astype(self.target_dtype),
)
]
)
for bsize in bsize_list
]
return responses
def finalize(self): del self.translator
\ No newline at end of file
def finalize(self):
del self.translator
......@@ -29,4 +29,4 @@ instance_group [
response_cache {
enable: true
}
\ No newline at end of file
}
......@@ -75,4 +75,4 @@ ensemble_scheduling {
}
}
]
}
\ No newline at end of file
}
This diff is collapsed.
......@@ -6,8 +6,73 @@ from .apply_bpe import BPE
from ilstokenizer import tokenizer
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
def initialize(self, args): self.target_dtype, self.bpes = pb_utils.triton_string_to_numpy(pb_utils.get_output_config_by_name(json.loads(args["model_config"]), "INPUT_TEXT_TOKENIZED")["data_type"]), {fname.rsplit('/', maxsplit=1)[-1][:-len('.src')]: BPE(open(fname, 'r', encoding='utf-8')) for fname in iglob(f'{os.path.dirname(os.path.abspath(__file__))}/bpe_src/*.src')}
def preprocess_text(self, text, source_lang, target_lang): return f"<to-gu> {text} <to-gu>" if source_lang == 'en' and target_lang == 'gu' else text
def execute(self, requests): return [pb_utils.InferenceResponse(output_tensors=[pb_utils.Tensor("INPUT_TEXT_TOKENIZED", numpy.array([[tokenized_sent] for tokenized_sent in tokenized_sents], dtype=self.target_dtype))]) for tokenized_sents in ((self.bpes[f"{input_language_id[0].decode('utf-8')}-{output_language_id[0].decode('utf-8')}"].segment(self.preprocess_text(tokenizer.tokenize(input_text[0].decode('utf-8').lower()), input_language_id[0].decode('utf-8'), output_language_id[0].decode('utf-8'))).strip() for input_text, input_language_id, output_language_id in zip(input_texts.as_numpy(), input_language_ids.as_numpy(), output_language_ids.as_numpy())) for input_texts, input_language_ids, output_language_ids in ((pb_utils.get_input_tensor_by_name(request, "INPUT_TEXT"), pb_utils.get_input_tensor_by_name(request, "INPUT_LANGUAGE_ID"), pb_utils.get_input_tensor_by_name(request, "OUTPUT_LANGUAGE_ID")) for request in requests))]
def finalize(self): pass
\ No newline at end of file
def initialize(self, args):
self.target_dtype, self.bpes = pb_utils.triton_string_to_numpy(
pb_utils.get_output_config_by_name(
json.loads(args["model_config"]), "INPUT_TEXT_TOKENIZED"
)["data_type"]
), {
fname.rsplit("/", maxsplit=1)[-1][: -len(".src")]: BPE(
open(fname, "r", encoding="utf-8")
)
for fname in iglob(
f"{os.path.dirname(os.path.abspath(__file__))}/bpe_src/*.src"
)
}
def preprocess_text(self, text, source_lang, target_lang):
return (
f"<to-gu> {text} <to-gu>"
if source_lang == "en" and target_lang == "gu"
else text
)
def execute(self, requests):
return [
pb_utils.InferenceResponse(
output_tensors=[
pb_utils.Tensor(
"INPUT_TEXT_TOKENIZED",
numpy.array(
[[tokenized_sent] for tokenized_sent in tokenized_sents],
dtype=self.target_dtype,
),
)
]
)
for tokenized_sents in (
(
self.bpes[
f"{input_language_id[0].decode('utf-8')}-{output_language_id[0].decode('utf-8')}"
]
.segment(
self.preprocess_text(
tokenizer.tokenize(input_text[0].decode("utf-8").lower()),
input_language_id[0].decode("utf-8"),
output_language_id[0].decode("utf-8"),
)
)
.strip()
for input_text, input_language_id, output_language_id in zip(
input_texts.as_numpy(),
input_language_ids.as_numpy(),
output_language_ids.as_numpy(),
)
)
for input_texts, input_language_ids, output_language_ids in (
(
pb_utils.get_input_tensor_by_name(request, "INPUT_TEXT"),
pb_utils.get_input_tensor_by_name(request, "INPUT_LANGUAGE_ID"),
pb_utils.get_input_tensor_by_name(
request, "OUTPUT_LANGUAGE_ID"
),
)
for request in requests
)
)
]
def finalize(self):
pass
......@@ -39,4 +39,4 @@ instance_group [
count: 8
kind: KIND_CPU
}
]
\ No newline at end of file
]
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment