Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
M
mt-model-deploy-dhruva
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Service Desk
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Operations
Operations
Environments
Packages & Registries
Packages & Registries
Package Registry
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
ssmt
mt-model-deploy-dhruva
Commits
19adc2a4
Commit
19adc2a4
authored
Aug 30, 2023
by
Nikhilesh Bhatnagar
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
batching fixes
parent
4029cd7c
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
2 additions
and
2 deletions
+2
-2
triton_models/ssmt_tokenizer/1/model.py
triton_models/ssmt_tokenizer/1/model.py
+2
-2
No files found.
triton_models/ssmt_tokenizer/1/model.py
View file @
19adc2a4
...
...
@@ -21,8 +21,8 @@ class TritonPythonModel:
def
execute
(
self
,
requests
):
source_gen
=
((
pb_utils
.
get_input_tensor_by_name
(
request
,
"INPUT_TEXT"
),
pb_utils
.
get_input_tensor_by_name
(
request
,
"INPUT_LANGUAGE_ID"
),
pb_utils
.
get_input_tensor_by_name
(
request
,
"OUTPUT_LANGUAGE_ID"
))
for
request
in
requests
)
tokenized_gen
=
(
self
.
tokenize_and_segment
(
input_text
.
as_numpy
()[
0
,
0
].
decode
(
'utf-8'
),
input_language_id
.
as_numpy
()[
0
,
0
].
decode
(
'utf-8'
),
output_language_id
.
as_numpy
()[
0
,
0
].
decode
(
'utf-8'
))
for
input_text
,
input_language_id
,
output_language_id
in
source_gen
)
responses
=
[
pb_utils
.
InferenceResponse
(
output_tensors
=
[
pb_utils
.
Tensor
(
"INPUT_TEXT_TOKENIZED"
,
numpy
.
array
([[
tokenized_sent
]
],
dtype
=
self
.
target_dtype
))])
for
tokenized_sent
in
tokenized_gen
]
tokenized_gen
=
(
(
self
.
tokenize_and_segment
(
input_text
[
0
].
decode
(
'utf-8'
),
input_language_id
[
0
].
decode
(
'utf-8'
),
output_language_id
[
0
].
decode
(
'utf-8'
))
for
input_text
,
input_language_id
,
output_language_id
in
zip
(
input_texts
.
as_numpy
(),
input_language_ids
.
as_numpy
(),
output_language_ids
.
as_numpy
()))
for
input_texts
,
input_language_ids
,
output_language_ids
in
source_gen
)
responses
=
[
pb_utils
.
InferenceResponse
(
output_tensors
=
[
pb_utils
.
Tensor
(
"INPUT_TEXT_TOKENIZED"
,
numpy
.
array
([[
tokenized_sent
]
for
tokenized_sent
in
tokenized_sents
],
dtype
=
self
.
target_dtype
))])
for
tokenized_sents
in
tokenized_gen
]
return
responses
def
finalize
(
self
):
pass
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment