Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in / Register
Toggle navigation
I
ilmt-api-urd-shallowparser
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Operations
Operations
Metrics
Analytics
Analytics
Repository
Value Stream
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Commits
Open sidebar
reva-codes
ilmt-api-urd-shallowparser
Commits
85fac48c
Commit
85fac48c
authored
May 17, 2022
by
priyank
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
updated tokenizer
parent
801d5361
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
117 additions
and
17 deletions
+117
-17
modules/ILMT-URD-HIN-Prune/lib/ILMT/URD/HIN/Prune.pm
modules/ILMT-URD-HIN-Prune/lib/ILMT/URD/HIN/Prune.pm
+2
-0
modules/ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/Tokenizer.pm
modules/ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/Tokenizer.pm
+20
-17
modules/ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/Tokenizer.pm-old
.../ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/Tokenizer.pm-old
+67
-0
modules/ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/tokenize.py
modules/ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/tokenize.py
+28
-0
No files found.
modules/ILMT-URD-HIN-Prune/lib/ILMT/URD/HIN/Prune.pm
View file @
85fac48c
...
@@ -10,6 +10,7 @@ use ILMT::URD::HIN::SSFAPI::shakti_tree_api;
...
@@ -10,6 +10,7 @@ use ILMT::URD::HIN::SSFAPI::shakti_tree_api;
sub
process
{
sub
process
{
my
%
par
=
@_
;
my
%
par
=
@_
;
utf8::
encode
(
$par
{'
data
'});
my
$input
=
$par
{'
data
'};
my
$input
=
$par
{'
data
'};
my
$db_file
=
__DIR__
.
"
/Prune/mapping.dat
";
my
$db_file
=
__DIR__
.
"
/Prune/mapping.dat
";
...
@@ -49,6 +50,7 @@ sub process {
...
@@ -49,6 +50,7 @@ sub process {
select
(
OUTFILE
);
select
(
OUTFILE
);
printstory
();
printstory
();
select
(
STDOUT
);
select
(
STDOUT
);
utf8::
decode
(
$result
);
return
$result
;
return
$result
;
}
}
...
...
modules/ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/Tokenizer.pm
View file @
85fac48c
...
@@ -4,6 +4,13 @@ use warnings;
...
@@ -4,6 +4,13 @@ use warnings;
use
Dir::
Self
;
use
Dir::
Self
;
use
Data::
Dumper
;
use
Data::
Dumper
;
use
IPC::
Run
qw(run)
;
use
List::
UtilsBy
qw(max_by)
;
use
File::
Temp
qw/ tempfile /
;
use
File::
Slurp
qw( slurp )
;
my
$cwd
=
__DIR__
;
my
%
daemons
=
(
my
%
daemons
=
(
"
tokenizer
"
=>
{
"
tokenizer
"
=>
{
"
path
"
=>
"
ind-tokz
",
"
path
"
=>
"
ind-tokz
",
...
@@ -15,22 +22,18 @@ my %daemons = (
...
@@ -15,22 +22,18 @@ my %daemons = (
sub
process
{
sub
process
{
my
%
args
=
@_
;
my
%
args
=
@_
;
utf8::
encode
(
$args
{
data
});
utf8::
encode
(
$args
{
data
});
my
$sentences
=
call_daemon
("
tokenizer
",
$args
{
data
});
open
INFILE
,
'
<
',
\
$sentences
or
die
$!
;
my
(
$fh2
,
$filename2
)
=
tempfile
("
tokenizer_inputXXXX
",
DIR
=>
"
/tmp
",
SUFFIX
=>
"
.tmp
");
my
$result
=
"";
print
$fh2
$args
{"
data
"};
my
$ctr
=
0
;
close
(
$fh2
);
while
(
my
$line
=
<
INFILE
>
)
{
$ctr
++
;
my
$token_out
;
$result
.=
"
<Sentence id=
\"
$ctr
\"
>
\n
";
run
["
python
",
"
$cwd
/tokenize.py
",
$filename2
],
"
>
",
\
$token_out
;
my
@words
=
split
'
',
$line
;
foreach
my
$index
(
0
..
$#words
)
{
unlink
$filename2
or
die
"
Couldn't delete temp file!
$filename2
";
$result
.=
$index
+
1
.
"
\t
$words
[
$index
]
\t
unk
\n
";
}
utf8::
decode
(
$token_out
);
$result
.=
"
</Sentence>
";
return
$token_out
;
}
close
INFILE
;
utf8::
decode
(
$result
);
return
$result
;
}
}
sub
run_daemons
{
sub
run_daemons
{
...
@@ -62,6 +65,6 @@ sub call_daemon {
...
@@ -62,6 +65,6 @@ sub call_daemon {
return
$result
;
return
$result
;
}
}
run_daemons
(("
tokenizer
"));
#
run_daemons(("tokenizer"));
1
;
1
;
modules/ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/Tokenizer.pm-old
0 → 100644
View file @
85fac48c
package
ILMT
::
URD
::
HIN
::
Tokenizer
;
use
strict
;
use
warnings
;
use
Dir
::
Self
;
use
Data
::
Dumper
;
my
%
daemons
=
(
"tokenizer"
=>
{
"path"
=>
"ind-tokz"
,
"args"
=>
"--l urd --s --daemonize --port"
,
"port"
=>
"31001"
}
);
sub
process
{
my
%
args
=
@
_
;
utf8
::
encode
($
args
{
data
});
my
$
sentences
=
call_daemon
(
"tokenizer"
,
$
args
{
data
});
open
INFILE
,
'<'
,
\$
sentences
or
die
$
!;
my
$
result
=
""
;
my
$
ctr
=
0
;
while
(
my
$
line
=
<
INFILE
>)
{
$
ctr
++;
$
result
.=
"<Sentence id=
\"
$ctr
\"
>
\n
"
;
my
@
words
=
split
' '
,
$
line
;
foreach
my
$
index
(
0.
.$#
words
)
{
$
result
.=
$
index
+
1
.
"
\t
$words[$index]
\t
unk
\n
"
;
}
$
result
.=
"</Sentence>"
;
}
close
INFILE
;
utf8
::
decode
($
result
);
return
$
result
;
}
sub
run_daemons
{
my
@
daemon_names
=
@
_
;
foreach
my
$
daemon_name
(@
daemon_names
)
{
my
%
daemon
=
%{$
daemons
{$
daemon_name
}};
my
$
cmd
=
"$daemon{path} $daemon{args} $daemon{port} &"
;
my
$
runfile
=
__DIR__
.
"/run/${daemon_name}_$daemon{port}"
;
system
(
"flock -e -w 0.01 $runfile -c '$cmd'"
)
==
0
or
warn
"["
.
__PACKAGE__
.
"]: Port $daemon{port} maybe unavailable! $?
\n
"
;
}
}
sub
call_daemon
{
my
($
daemon_name
,
$
input
)
=
@
_
;
my
$
port
=
$
daemons
{$
daemon_name
}{
port
};
my
($
socket
,
$
client_socket
);
$
socket
=
new
IO
::
Socket
::
INET
(
PeerHost
=>
'127.0.0.1'
,
PeerPort
=>
$
port
,
Proto
=>
'tcp'
,
)
or
die
"ERROR in Socket Creation : $!
\n
"
;
$
socket
->
send
(
"$input
\n
"
);
my
$
result
=
""
;
while
(
my
$
line
=
$
socket
->
getline
)
{
$
result
.=
$
line
;
}
$
socket
->
close
();
return
$
result
;
}
run_daemons
((
"tokenizer"
));
1
;
modules/ILMT-URD-HIN-Tokenizer/lib/ILMT/URD/HIN/tokenize.py
0 → 100644
View file @
85fac48c
import
os
,
sys
,
codecs
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created by
@author: priyank
'''
def
tokenizer
(
text
,
ind
):
"""Tokenize the text only on space."""
tokens
=
text
.
split
()
tokens_ssf
=
[
str
(
index
+
1
)
+
'
\t
'
+
token
+
'
\t
unk'
for
index
,
token
in
enumerate
(
tokens
)]
tokens_ssf_with_sentence
=
[
'<Sentence id="'
+
str
(
ind
+
1
)
+
'">'
]
+
tokens_ssf
+
[
'</Sentence>'
]
return
'
\n
'
.
join
(
tokens_ssf_with_sentence
)
f
=
codecs
.
open
(
sys
.
argv
[
1
],
"rb"
,
"utf-8"
)
lines
=
f
.
readlines
()
f
.
close
()
finalOutput
=
""
ii
=
0
for
line
in
lines
:
line
=
line
.
strip
()
if
line
:
finalOutput
=
finalOutput
+
tokenizer
(
line
,
(
ii
))
+
"
\n
"
ii
=
ii
+
1
print
(
finalOutput
.
encode
(
'utf-8'
))
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment