Commit 0d14d2d4 authored by priyank's avatar priyank

urd shallow parser first commit

parents
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created by
@author: priyank
'''
import json
import requests
from SocketServer import ThreadingMixIn
import threading
import codecs
import re
import cgi
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from optparse import OptionParser
from urlparse import urlparse, parse_qs
import os
import sys
from argparse import ArgumentParser
#configuring commandline parser and check if the all command line parameters are valid
parser=ArgumentParser()
parser.add_argument('-c', '--serverConfigFile', help='server configuration file (with path)', required=True)
parser.add_argument('-i', '--inputFile', help='inputFile (with path)', required=True)
args = parser.parse_args()
#getting command line config files and check if files exist
serverFile = args.serverConfigFile
inputFile = args.inputFile
#function to get sentences from SSF
def sentenceCollector(inputString):
if "Sentence><Sentence" in inputString:
inputString = inputString.replace('Sentence><Sentence', 'Sentence>\n<Sentence')
inArray = inputString.strip().split("\n")
sentList = []
tempString = ""
for line in inArray:
line = line.rstrip()
if line:
if line.startswith('<Sentence '):
tempString = tempString + line + "\n"
elif line.startswith('</Sentence'):
tempString = tempString + line + "\n"
sentList.append(tempString)
tempString = ""
else:
tempString = tempString + line + "\n"
return sentList
# Function to get output of lats module(wordgenerator)
def wordgenCollector(inputString):
inArray = inputString.strip().split("\n")
#sentList = []
tempString = ""
for line in inArray:
line = line.rstrip()
linearray = line.split("\t")
if line and len(linearray) >=2:
if line.startswith('<Sentence '):
continue
elif line.startswith('</Sentence'):
continue
elif linearray[1] == '((' or linearray[1] == '))':
continue
else:
tempString = tempString + linearray[1] + " "
return tempString
if not os.path.isfile(serverFile):
print " serverFile file", serverFile ,"does not exist."
sys.exit(0);
if not os.path.isfile(inputFile):
print " inputFile file", inputFile ,"does not exist."
sys.exit(0);
server_details = {}
#getting server details
with open(serverFile) as server_file:
server_details = json.load(server_file)
translationURL = server_details['urd']
f = codecs.open(inputFile, "rb", "utf-8")
lines = f.readlines()
f.close()
tokenizerURLArray = translationURL.split("/")
tokenizerURLArray[-2] = '1'
modulesURL = tokenizerURLArray[0] + "/" + tokenizerURLArray[1] + "/" + tokenizerURLArray[2] + "/" + tokenizerURLArray[5] + "/" + tokenizerURLArray[6] + "/modules"
tokenizerURL = "/".join(tokenizerURLArray)
tokenizerURLArray = translationURL.split("/")
tokenizerURLArray[-3] = '2'
translationURL = "/".join(tokenizerURLArray)
myheaders = {"Content-type": "application/x-www-form-urlencoded; charset=UTF-8"}
proxies = {
"http" :None,
"https":None
}
res = requests.post(modulesURL, proxies=proxies, headers=myheaders)
lastModule = ''
secondLastModule = ''
# getting last modules
if res is not None:
modulesList = json.loads(res.text)
lastModule = modulesList[-1]
secondLastModule = modulesList[-2]
else:
print "Null response from server"
sys.exit(0)
response_data = {}
response_data['language'] = 'urd'
response_data['text'] = lines
output= ""
wxoutput = ""
# processing sentence in each line by calling MT
# Processing paras: one line is considered as a para
iii = 0
intermediatearray = []
mystr = ""
for line in lines:
line = line.strip()
if line :
# calling tokenizer on line
dataToSend = {"data":line.strip().encode('utf-8')}
res = requests.post(tokenizerURL, proxies=proxies, headers=myheaders, data=dataToSend)
tokenOut = json.loads(res.text)
sentences = sentenceCollector(tokenOut['tokenizer-1'])
jjj = 0
tempdict = {}
mystr = mystr + "paraid:" + str((iii + 1)) + "\n" + line + "\n"
for sentence in sentences:
dataToSend = {"data":sentence.strip().encode('utf-8').strip()}
res = requests.post(translationURL, proxies=proxies, headers=myheaders, data=dataToSend)
completeOut = json.loads(res.text)
lastmoduleOutput = completeOut[lastModule+"-"+str((modulesList.index(lastModule))+1)]
secondlastmoduleOutput = completeOut[secondLastModule+"-"+str((modulesList.index(secondLastModule))+1)]
finalOutput = lastmoduleOutput
output = output + finalOutput + " \n\n"
wxoutput = wxoutput + secondlastmoduleOutput + " \n\n"
mystr = mystr + "sentid:" + str((jjj + 1)) + "\n" + line + "\n"
mystr = mystr + lastmoduleOutput + "\n"
jjj = jjj + 1
iii = iii + 1
output = output + " \n\n"
wxoutput = wxoutput + " \n\n"
mystr = mystr + "---------------------------------------------------------\n"
print mystr
# ILMT-API
An API for quering ILMT Systems
## For Installation of dependencies related to Sampark refer to dependencies.txt
## For Installation of perl related dependencies run script install.sh with following command:
```
sh install.sh
```
## Steps to install Shallowparser MT modules run:
```
cd ilmt-api-urd-shallowparser/
sh setup.sh
```
## Steps to test API:
a) On browser run following URL:
```
http://<YOUR_IP>:8484/urd/hin/translate?data="شیوسینکوں نے ممبئی کے چند تھیئٹرز پر ہنگامے کیے۔ "&pretty=true
```
b) here replace <YOUR_IP> with your IP address.
c) For testing API with curl run the following:
```
curl --noproxy '*' http://localhost:8484/urd/hin/translate --data data="شیوسینکوں نے ممبئی کے چند تھیئٹرز پر ہنگامے کیے۔"
```
d) For MT with UI run following command:
```
http://<YOUR_IP>:8484/
```
e) Now you would see proper translation panels.
## To execute sentences using API:
please update the host IP in server.json from 10.2.63.52 to your IP and run following command:
$python GetShallowParserOutput.py -c server.json -i input.txt
## To check which ports are allotted to different services, run following command:
```
find ./ | grep 'run/.*_'
```
## Shallowparser of Urdu Language
#!/usr/bin/env perl
use Dir::Self;
use strict;
use warnings;
use Data::Dumper;
use Mojolicious::Lite;
use lib __DIR__ . "/lib";
use ILMT::Translator qw(get_translator get_langpairs);
use ILMT::URD::HIN;
plugin qw(Mojolicious::Plugin::ForkCall);
any '/:src/:tgt/translate' => sub {
my $c = shift->render_later;
$c->inactivity_timeout(3600);
my %args = %{$c->req->params->to_hash};
$args{'src_lang'} = $c->param('src');
$args{'tgt_lang'} = $c->param('tgt');
$args{'data'} = $args{'input'} = $args{'data'} // $args{'input'};
$c->fork_call(
sub {
my (%args) = @_;
my $translator = get_translator(uc($c->param('src')), uc($c->param('tgt')));
return $translator->translate(%args);
},
[%args],
sub {
my ($c, $final_result) = @_;
if (exists $args{"pretty"}) {
my $final_string = join "\n", map { "$_:\n$final_result->{$_}" } keys %$final_result;
$c->render(template => 'pretty', result => $final_string);
} else {
$c->render(json => $final_result);
}
}
);
};
any '/:src/:tgt/:start/:end' => sub {
my $c = shift->render_later;
$c->inactivity_timeout(3600);
my %args = %{$c->req->params->to_hash};
$args{'src_lang'} = $c->param('src');
$args{'tgt_lang'} = $c->param('tgt');
$args{'data'} = $args{'input'} = $args{'data'} // $args{'input'};
$c->fork_call(
sub {
my (%args) = @_;
my $translator = get_translator(uc($c->param('src')), uc($c->param('tgt')));
return $translator->partial_p($c->param('start'), $c->param('end'), %args);
},
[%args],
sub {
my ($c, $final_result) = @_;
if (exists $args{"pretty"}) {
my $final_string = join "\n", map { "$_:\n$final_result->{$_}" } keys %$final_result;
$c->render(template => 'pretty', result => $final_string);
} else {
$c->render(json => $final_result);
}
}
);
};
any '/partialtranslate/new/:src/:tgt/:start/:end/partial' => sub {
print "inside partialtranslate/new ............";
my $c = shift;
my %args = %{$c->req->params->to_hash};
$args{'src_lang'} = $c->param('src');
$args{'tgt_lang'} = $c->param('tgt');
$args{'data'} = $args{'input'} = $args{'data'} // $args{'input'};
my $translator = get_translator(uc($c->param('src')), uc($c->param('tgt')));
my $final_result = $translator->partial_p($c->param('start'), $c->param('end'), %args);
#$c->render(json => $final_result);
if (exists $args{"pretty"}) {
my $final_string = join "\n", map { "$_:\n$final_result->{$_}" } keys %$final_result;
$c->render(template => 'pretty', result => $final_string);
} else {
$c->render(json => $final_result);
}
};
any '/mytranslate/new/api/:src/:tgt/mytranslate' => sub {
print "inside mytranslate ............";
my $c = shift;
my %args = %{$c->req->params->to_hash};
$args{'src_lang'} = $c->param('src');
$args{'tgt_lang'} = $c->param('tgt');
$args{'data'} = $args{'input'} = $args{'data'} // $args{'input'};
my $translator = get_translator(uc($c->param('src')), uc($c->param('tgt')));
my $final_result = $translator->translate(%args);
#$c->render(json => $final_result);
if (exists $args{"pretty"}) {
my $final_string = join "\n", map { "$_:\n$final_result->{$_}" } keys %$final_result;
$c->render(template => 'pretty', result => $final_string);
} else {
$c->render(json => $final_result);
}
};
any '/:src/:tgt/' => sub {
my $c = shift;
my $translator = get_translator(uc($c->param('src')), uc($c->param('tgt')));
$c->render(text => scalar @{$translator->{seq}});
};
any '/:src/:tgt/modules' => sub {
my $c = shift;
my $translator = get_translator(uc($c->param('src')), uc($c->param('tgt')));
my @modules = map { lc($_) } @{$translator->{seq}};
$c->render(json => \@modules);
};
any '/langpairs' => sub {
my $c = shift;
my %langpairs = get_langpairs();
print Dumper(\%langpairs);
$c->render(json => \%langpairs);
};
get '/' => sub {
my $c = shift;
$c->reply->static('index.html');
};
app->start;
__DATA__
@@ pretty.html.ep
<pre><%= $result %></pre>
Dependencies:
+ jdk8
Follow below steps to install jdk:-
a) Download Jdk-8 from oracle website for linux 64 bit tar
b) cp downloaded package to '/usr/local'
c) tar -xvf jdk-<version>-linux-x64.tar.gz
d) vim /etc/profile
e) export JAVA_HOME="/usr/local/jdk<version>"
f) export PATH=$PATH:$JAVA_HOME/bin
g) source /etc/profile
h) java -version
+ CRF++ 0.51+
Follow below steps to install CRF++:-
$ cd CRF++-0.51
$./configure
$make
$make install
check if it is installed properly or not :-
crf_test --version
Note:-
if 'libcrfpp.so.0' not found after CRF installation than use below command
ln -s /usr/local/lib/libcrfpp.so.0 /usr/lib/libcrfpp.so.0
+ gcc
+ gdbm
+ libgdbm-dev
+ glib-2.0
+ libglib2.0-dev
For ubuntu use following command:
sudo apt-get install libgdbm-dev libglib2.0-dev g++
# apache ant installation
- wget https://downloads.apache.org/ant/manual/apache-ant-1.10.6-manual.tar.gz
- sudo cp apache-ant-1.10.6-manual.tar.gz /usr/local/
- cd /usr/local/
- sudo su
- tar -xvf apache-ant-1.10.6-manual.tar.gz
- # exit from root by typing exit
- vim .bashrc
- Add following lines at the bottom of file:
export ANT_HOME=/usr/local/<ANT DIRECTORY
export PATH=${PATH}:${ANT_HOME}/bin
- save the file
- $source .bashrc
کئی سنیما گھروں نے ہاؤس فل کا بورڈ لگا دیا اور ایڈوانس بکنگ شروع کر دی گی۔
جمعہ کی صبح تک فلم ممبئی میں ریلیز ہو گی بھی یا نہیں اس پر سنیما مالکان تذبذب کا شکار تھے لیکن دن ہوتے ہی چند ملٹی پلیکس میں فلم کی نمائش شروع ہوگئی۔
شیوسینکوں نے ممبئی کے چند تھیئٹرز پر ہنگامے کیے۔
اندھیری کے فن ری پبلک تھیئٹر پر خواتین شیوسینکوں نے ہنگامہ آرائی کی کوشش کی چند نے تھیئٹر میں داخل ہونا چاہا لیکن پولیس نے سب کو حراست میں لے لیا۔
sudo apt-get install curl
curl -L http://cpanmin.us | perl - --sudo App::cpanminus
sudo cpanm Data::Dumper
sudo cpanm Dir::Self
sudo cpanm Mojolicious::Lite
sudo cpanm Module::Runtime
sudo cpanm Module::Pluggable
sudo cpanm Mojolicious::Plugin::ForkCall
sudo cpanm IPC::Run
sudo cpanm Module::Pluggable
sudo apt-get install python-argparse requests
package ILMT::Translator;
use strict;
use warnings;
use Dir::Self;
use Data::Dumper;
use Exporter qw(import);
use Module::Pluggable::Object;
use Module::Runtime qw(use_module);
our @EXPORT_OK = qw(get_translator get_langpairs);
my %translator_table;
sub new_translator {
my $class = shift;
my $self = {
src => shift,
tgt => shift,
};
my $search_path = "ILMT::$self->{src}::$self->{tgt}";
@{$self->{plugins}} = map use_module($_),
grep /^${search_path}::[^:]+$/,
Module::Pluggable::Object->new(search_path => $search_path)->plugins;
$self->{seq} = shift;
bless $self, $class;
# Register this module as a translator service
$translator_table{$self->{src}}{$self->{tgt}} = $self;
return $self;
}
sub get_translator {
my ($src, $tgt) = @_;
return $translator_table{$src}{$tgt};
}
sub get_langpairs {
print Dumper(\%translator_table);
return map +(lc $_ => [ map lc, keys %{$translator_table{$_}} ]), keys %translator_table;
}
sub translate {
my ($self, %args) = @_;
my $result = "";
my @identifiers;
my %final_result;
my @dispatch_seq = @{$self->{seq}};
foreach my $index (0 .. $#dispatch_seq) {
my $module = $dispatch_seq[$index ++];
my $identifier = lc("${module}-$index");
push @identifiers, $identifier;
my $package = "ILMT::$self->{src}::$self->{tgt}::$module";
$args{$identifier} = $package->can('process')->(%args);
$args{'data'} = $args{$identifier};
}
@final_result{@identifiers} = @args{@identifiers};
return \%final_result;
}
sub partial_p {
my ($self, $start, $end, %args) = @_;
my $result = "";
my @dispatch_seq = @{$self->{seq}};
my @identifiers;
my %final_result;
foreach my $index ($start .. $end) {
my $module = $dispatch_seq[$index - 1];
my $identifier = lc("${module}-$index");
push @identifiers, $identifier;
print "module ## $module\n";
my $package = "ILMT::$self->{src}::$self->{tgt}::$module";
$args{$identifier} = $package->can('process')->(%args);
$args{'data'} = $args{$identifier};
}
@final_result{@identifiers} = @args{@identifiers};
return \%final_result;
}
1;
package ILMT::URD::HIN;
use strict;
use warnings;
use Data::Dumper;
use ILMT::Translator;
my @seq = (
"Tokenizer",
"UTF2WX_U",
"Morph",
"WX2UTF_U",
"POSTagger",
"UTF2WX_U",
"Chunker",
"Prune",
"PickOneMorph",
"ComputeHead",
"WX2UTF_U"
);
my $langpair_obj = new_translator ILMT::Translator("URD", "HIN", \@seq);
use strict;
use warnings;
use Data::Dumper;
use Graph::Directed;
use JSON;
use List::Util qw(reduce);
use Mojolicious::Lite;
use Mojo::Redis2;
use lib "./lib";
use ILMT::URD::HIN::Chunker;
my $modulename = "ilmt.urd.hin.chunker";
my %database = ();
helper redis => sub {
state $r = Mojo::Redis2->new(url => "redis://redis:6379");
};
sub process {
my $hash = $_[0];
my %newhash;
if (keys %{$hash} == 1) {
%newhash = (data => (%{$hash})[1]);
} else {
@newhash{ map { s/_[^_]*$//r } keys %{$hash} } = values %{$hash};
}
return ILMT::URD::HIN::Chunker::process(%newhash);
}
sub genError {
my $c = shift;
my $error = shift;
$c->render(json => to_json({Error => $error}), status => 400);
}
sub genDAGGraph {
my %edges = %{$_[0]};
my $g = Graph::Directed->new();
foreach my $from (keys %edges) {
foreach my $to (@{$edges{$from}}) {
$g->add_edge($from, $to);
}
}
return $g;
}
post '/pipeline' => sub {
my $c = shift;
my $ilmt_json = decode_json($c->req->body);
my $ilmt_modid = $ilmt_json->{modid} || genError($c, "No ModuleID Specified!") && return;
my $ilmt_jobid = $ilmt_json->{jobid} || genError($c, "No JobID Specified!") && return;
my $ilmt_data = $ilmt_json->{data} || genError($c, "No Data Specified!") && return;
my $ilmt_dag = genDAGGraph($ilmt_json->{edges});
genError($c, "Edges not specified!") && return if (!$ilmt_dag);
my $ilmt_module = $modulename . '_' . $ilmt_modid;
my @ilmt_inputs = map {@$_[0]} $ilmt_dag->edges_to($ilmt_module);
if (!$database{$ilmt_jobid}) {
$database{$ilmt_jobid} = {};
$database{"data_$ilmt_jobid"} = {};
}
foreach (@ilmt_inputs) {
my $input_module = $_ =~ s/_[^_]*$//r;
$database{$ilmt_jobid}{$input_module} = $ilmt_data->{$_} if $ilmt_data->{$_};
}
%{$database{"data_$ilmt_jobid"}} = (%{$database{"data_$ilmt_jobid"}}, %{$ilmt_data});
if (@ilmt_inputs == keys %{$database{$ilmt_jobid}}) {
$c->render(json => "{Response: 'Processing...'}", status => 202);
my $ilmt_output = process($database{$ilmt_jobid});
$ilmt_data->{$ilmt_module} = $ilmt_output;
%{$ilmt_data} = (%{$ilmt_data}, %{$database{"data_$ilmt_jobid"}});
my @ilmt_next = map {@$_[1]} $ilmt_dag->edges_from($ilmt_module);
if (@ilmt_next) {
foreach (@ilmt_next) {
my @module_info = split(/_([^_]+)$/, $_);
my $next_module = $module_info[0];
$ilmt_json->{modid} = $module_info[1];
$c->ua->post("http://$next_module/pipeline" => json
=> from_json(encode_json($ilmt_json), {utf8 => 1}) => sub {
my ($ua, $tx) = @_;
my $msg = $tx->error ? $tx->error->{message} : $tx->res->body;
$c->app->log->debug("[$ilmt_jobid]: $msg\n");
});
}
} else {
$c->redis->publish($ilmt_jobid => encode_json($ilmt_json));
}
delete $database{$ilmt_jobid};
} else {
$c->render(json => "{Response: 'Waiting for more inputs...'}", status => 202);
}
};
app->start;
package ILMT::URD::HIN::Chunker;
use strict;
use warnings;
use Dir::Self;
use Data::Dumper;
use IPC::Run qw(run);
my @dispatch_seq = (
"ssf2tnt",
"crf_test",
"bio2ssf",
);
sub process {
my %args = @_;
utf8::encode($args{'data'});
foreach my $submodule (@dispatch_seq) {
$args{'data'} = __PACKAGE__->can($submodule)->(%args);
}
utf8::decode($args{'data'});
return $args{"data"};
}
sub ssf2tnt {
my %par = @_;
my $data = $par{'data'};
my $result = "";
open my $fh, '<', \$data or die $!;
while (my $line = <$fh>)
{
chomp($line);
if($line=~/<\/S/)
{
$result .= "\n";
next;
}
if($line =~ /^\s*$/) # if the line has all space charcters
{
$result .= "\n";
next;
}
$line=~s/[ ]+/___/g;
my ($att1,$att2,$att3,$att4) = split (/[\t]+/, $line);
if($att1 =~ /\<.*/ || $att2 eq "((" || $att2 eq "))") #unwanted lines
{
next;
}
else
{
$result .= "$att2\t$att3\t$att4\n";
}
}
return $result;
}
sub bio2ssf {
my %par = @_;
my $data = $par{'data'};
my $result = "";
open my $fh, '<', \$data or die $!;
my $line = "";
my $startFlag = 1;
my $wno = 1;
my $prevCTag = "";
my $error = "";
my $lno = 0;
my $sno = 1;
my $cno=0;
while($line = <$fh>)
{
$lno ++;
if($line =~ /^\s*$/)
{ # start of a sentence
$result .= "\t))\t\t\n";
$result .= "</Sentence>\n\n";
$startFlag = 1;
$wno = 1;
$prevCTag = "";
$sno ++;
next;
}
if($startFlag == 1)
{
$result .= "<Sentence id=\"$sno\">\n";
}
chomp($line);
my @cols = split(/\s+/,$line);
if($cols[3] =~ /^B-(\w+)/)
{
my $ctag = $1;
if($prevCTag ne "O" && $startFlag == 0)
{
$result .= "\t))\t\t\n";
$wno++;
}
$cno++;
$result .= "$cno\t((\t$ctag\t\n";
$wno=1;
$prevCTag = $ctag;
}
elsif($cols[3] =~ /^O/)
{
if($prevCTag ne "O" && $startFlag == 0)
{
$result .= "\t))\t\t\n";
$wno++;
}
$prevCTag = "O";
}
if($cols[3] =~ /I-(\w+)/ )
{ # check for inconsistencies .. does not form a chunk if there r inconsistencies
my $ctag = $1;
if($ctag ne $prevCTag)
{
$error =$error . "Inconsistency of Chunk tag in I-$ctag at Line no:$lno : There is no B-$ctag to the prev. word\n";
}
}
$cols[2]=~s/___/ /g;
$result .= "$cno.$wno\t$cols[0]\t$cols[1]\t$cols[2]\n";
$wno ++;
$startFlag = 0;
}
return $result;
}
sub crf_test {
my %par = @_;
my $data = $par{'data'};
my $result = "";
run ["/usr/local/bin/crf_test", "-m", __DIR__ . "/Chunker/models/urd_200k_chunker.model"], \$data, \$result;
return $result;
}
[submodule "API"]
path = API
url = https://gitlab.com/ilmt/ILMT-URD-HIN-SSFAPI.git
use strict;
use warnings;
use Data::Dumper;
use Graph::Directed;
use JSON;
use List::Util qw(reduce);
use Mojolicious::Lite;
use Mojo::Redis2;
use lib qw(lib API/lib);
use ILMT::URD::HIN::ComputeHead;
my $modulename = "ilmt.urd.hin.computehead";
my %database = ();
helper redis => sub {
state $r = Mojo::Redis2->new(url => "redis://redis:6379");
};
sub process {
my $hash = $_[0];
my %newhash;
if (keys %{$hash} == 1) {
%newhash = (data => (%{$hash})[1]);
} else {
@newhash{ map { s/_[^_]*$//r } keys %{$hash} } = values %{$hash};
}
return ILMT::URD::HIN::ComputeHead::process(%newhash);
}
sub genError {
my $c = shift;
my $error = shift;
$c->render(json => to_json({Error => $error}), status => 400);
}
sub genDAGGraph {
my %edges = %{$_[0]};
my $g = Graph::Directed->new();
foreach my $from (keys %edges) {
foreach my $to (@{$edges{$from}}) {
$g->add_edge($from, $to);
}
}
return $g;
}
post '/pipeline' => sub {
my $c = shift;
my $ilmt_json = decode_json($c->req->body);
my $ilmt_modid = $ilmt_json->{modid} || genError($c, "No ModuleID Specified!") && return;
my $ilmt_jobid = $ilmt_json->{jobid} || genError($c, "No JobID Specified!") && return;
my $ilmt_data = $ilmt_json->{data} || genError($c, "No Data Specified!") && return;
my $ilmt_dag = genDAGGraph($ilmt_json->{edges});
genError($c, "Edges not specified!") && return if (!$ilmt_dag);
my $ilmt_module = $modulename . '_' . $ilmt_modid;
my @ilmt_inputs = map {@$_[0]} $ilmt_dag->edges_to($ilmt_module);
if (!$database{$ilmt_jobid}) {
$database{$ilmt_jobid} = {};
$database{"data_$ilmt_jobid"} = {};
}
foreach (@ilmt_inputs) {
my $input_module = $_ =~ s/_[^_]*$//r;
$database{$ilmt_jobid}{$input_module} = $ilmt_data->{$_} if $ilmt_data->{$_};
}
%{$database{"data_$ilmt_jobid"}} = (%{$database{"data_$ilmt_jobid"}}, %{$ilmt_data});
if (@ilmt_inputs == keys %{$database{$ilmt_jobid}}) {
$c->render(json => "{Response: 'Processing...'}", status => 202);
my $ilmt_output = process($database{$ilmt_jobid});
$ilmt_data->{$ilmt_module} = $ilmt_output;
%{$ilmt_data} = (%{$ilmt_data}, %{$database{"data_$ilmt_jobid"}});
my @ilmt_next = map {@$_[1]} $ilmt_dag->edges_from($ilmt_module);
if (@ilmt_next) {
foreach (@ilmt_next) {
my @module_info = split(/_([^_]+)$/, $_);
my $next_module = $module_info[0];
$ilmt_json->{modid} = $module_info[1];
$c->ua->post("http://$next_module/pipeline" => json
=> from_json(encode_json($ilmt_json), {utf8 => 1}) => sub {
my ($ua, $tx) = @_;
my $msg = $tx->error ? $tx->error->{message} : $tx->res->body;
$c->app->log->debug("[$ilmt_jobid]: $msg\n");
});
}
} else {
$c->redis->publish($ilmt_jobid => encode_json($ilmt_json));
}
delete $database{$ilmt_jobid};
} else {
$c->render(json => "{Response: 'Waiting for more inputs...'}", status => 202);
}
};
app->start;
package ILMT::URD::HIN::ComputeHead;
use strict;
use warnings;
use Dir::Self;
use Data::Dumper;
use ILMT::URD::HIN::SSFAPI::feature_filter;
use ILMT::URD::HIN::SSFAPI::shakti_tree_api;
use ILMT::URD::HIN::ComputeHead::make_chunk_name;
use ILMT::URD::HIN::ComputeHead::copy_np_head;
use ILMT::URD::HIN::ComputeHead::copy_vg_head;
sub process {
my %args = @_;
my $input = $args{'data'};
utf8::encode($input);
read_story(\$input);
my $numBody = get_bodycount();
my $result;
my $body;
for(my($bodyNum)=1;$bodyNum<=$numBody;$bodyNum++)
{
$body = &get_body($bodyNum,$body);
# Count the number of Paragraphs in the story
my($numPara) = &get_paracount($body);
#print STDERR "Paras : $numPara\n";
# Iterate through paragraphs in the story
for(my($i)=1;$i<=$numPara;$i++)
{
my($para);
# Read Paragraph
$para = &get_para($i);
# Count the number of sentences in this paragraph
my($numSent) = &get_sentcount($para);
# print STDERR "\n $i no.of sent $numSent";
#print STDERR "Para Number $i, Num Sentences $numSent\n";
#print $numSent."\n";
# Iterate through sentences in the paragraph
for(my($j)=1;$j<=$numSent;$j++)
{
#print " ... Processing sent $j\n";
# Read the sentence which is in SSF format
my($sent) = &get_sent($para,$j);
#print STDERR "$sent";
# print "check--\n";
# &print_tree($sent);
# Get the nodes of the sentence (words in our case)
#Copy NP head
# &AddID($sent);
&make_chunk_name($sent);
&copy_np_head($sent);
#Copy NP VG head
&copy_vg_head($sent);
}
}
}
open OUTFILE, '>', \$result or die $!;
select(OUTFILE);
printstory();
select(STDOUT);
utf8::decode($result);
return $result;
}
1;
package ILMT::URD::HIN::ComputeHead::copy_np_head;
use Exporter qw(import);
use ILMT::URD::HIN::ComputeHead::get_head_np;
our @EXPORT = qw(copy_np_head);
# For the details please see get_head.pl
sub copy_np_head
{
my $sent=@_[0];
&copy_head_np("NP",$sent);
&copy_head_np("JJP",$sent);
&copy_head_np("CCP",$sent);
&copy_head_np("RBP",$sent);
&copy_head_np("BLK",$sent);
&copy_head_np("NEGP",$sent);
#&print_tree();
} #End of Sub
1;
package ILMT::URD::HIN::ComputeHead::copy_vg_head;
use Exporter qw(import);
use ILMT::URD::HIN::SSFAPI::feature_filter;
use ILMT::URD::HIN::SSFAPI::shakti_tree_api;
use ILMT::URD::HIN::ComputeHead::get_head_vg;
our @EXPORT = qw(copy_vg_head);
#for details please check get_head.pl
sub copy_vg_head
{
my $sent=@_[0];
&copy_head_vg("VGF",$sent);
&copy_head_vg("VGNF",$sent);
&copy_head_vg("VGINF",$sent);
&copy_head_vg("VGNN",$sent);
}
1;
package ILMT::URD::HIN::ComputeHead::get_head_np;
use Exporter qw(import);
use ILMT::URD::HIN::SSFAPI::feature_filter;
use ILMT::URD::HIN::SSFAPI::shakti_tree_api;
our @EXPORT = qw(copy_head_np);
sub copy_head_np
{
my ($pos_tag)=$_[0];
my ($sent)=$_[1];
my $vibh_home = $_[2];
my %hash=();
if($pos_tag =~ /^NP/)
{
$match = "NN"; #Modified in version 1.4
#For NST
}
if($pos_tag =~ /^V/ )
{
$match = "V";
}
if($pos_tag =~ /^JJP/ )
{
$match = "J";
}
if($pos_tag =~ /^CCP/ )
{
$match = "CC";
}
if($pos_tag =~ /^RBP/ )
{
$match = "RB";
}
my @np_nodes = &get_nodes(3,$pos_tag,$sent);
for($i=$#np_nodes;$i>=0;$i--)
{
my (@childs)=&get_children($np_nodes[$i],$sent);
$j = $#childs;
while($j >= 0)
{
my($f0,$f1,$f2,$f3,$f4)=&get_fields($childs[$j],$sent);
$word=$f2;
my $fs_ref = &read_FS($f4);
my @name_val = &get_values("name", $fs_ref);
if($f3 eq "PRP") ##to make sure that the pronouns are identified correctly
{
$f3 = "NN";
}
if($f3=~/^$match/)
{
if($hash{$f2} eq "")
{
$hash{$word}=1;
}
elsif($hash{$f2} ne "")
{
$hash{$word}=$hash{$word}+1;
}
$id=$hash{$word};
my ($x,$y)=split(/>/,$f4);
$x =~ s/ name=[^ >]+//;
if($id==1)
{
$att_val="$word";
}
elsif($id!=1)
{
$att_val="$word"."_"."$id";
}
$new_fs = $x." head=\'$name_val[0]\'>";
#$new_fs = $x." head=$name_val[0]>";
#my $new_head_fs=$x." name=\"$att_val\">";
#&modify_field($childs[$j],4,$new_head_fs,$sent);
last;
}
elsif($j == 0)
{
my($f0,$f1,$f2,$f3,$f4)=&get_fields($childs[$#childs],$sent);
#-----------------modifications to handle PRP and PSP case------------------
$change=$#childs;
while(1)
{
if($f3 eq "PSP" or $f3 eq "PRP")
{
$change=$change-1;
if($childs[$change] eq "") ##Modifications per Version 1.3
{ ##To handle NP chunks with single PSP
$change=$change+1; ##
last; ##
}
($f0,$f1,$f2,$f3,$f4)=&get_fields($childs[$change],$sent);
}
else
{
last;
}
}
$new_fs = $f4;
$word=$f2;
my $fs_ref = &read_FS($f4);
my @name_val = &get_values("name", $fs_ref);
if($hash{$f2} eq "")
{
$hash{$word}=1;
}
elsif($hash{$f2} ne "")
{
$hash{$word}=$hash{$word}+1;
}
$id=$hash{$word};
#--------------------------------------------------------------------------------
my ($x,$y)=split(/>/,$f4);
$x =~ s/ name=[^ >]+//;
if($id==1)
{
$att_val="$word";
}
elsif($id!=1)
{
$att_val="$word"."_"."$id";
}
$new_fs = $x." head=\'$name_val[0]\'>";
#$new_fs = $x." head=$name_val[0]>";
#my $new_head_fs=$x." name=\"$att_val\">";
#&modify_field($childs[$change],4,$new_head_fs,$sent);
}
$j--;
}
($f0,$f1,$f2,$f3,$f4) = &get_fields($np_nodes[$i],$sent);
if($f4 eq '')
{
##print "1check ---$new_fs\n";
&modify_field($np_nodes[$i],4,$new_fs,$sent);
($f0,$f1,$f2,$f3,$f4) = &get_fields($np_nodes[$i],$sent);
$fs_ptr = &read_FS($f4,$sent);
#print "---x--$x\n";
#&add_attr_val("name",$head_att_val,$fs_ptr,$sent);
($f0,$f1,$f2,$f3,$f4) = &get_fields($np_nodes[$i],$sent);
#print "2check ---$f4\n";
}
else
{
$fs_ptr = &read_FS($f4,$sent);
$new_fs_ptr = &read_FS($new_fs,$sent);
&merge($fs_ptr,$new_fs_ptr,$sent);
$fs_string = &make_string($fs_ptr);
&modify_field($np_nodes[$i],4,$fs_string,$sent);
($f0,$f1,$f2,$f3,$f4) = &get_fields($np_nodes[$i],$sent);
$fs_ptr = &read_FS($f4,$sent);
#&add_attr_val("name",$head_att_val,$fs_ptr,$sent);
#&modify_field($np_nodes[$i], 4, $head_att_val,$sent);
}
}
#print "hiii--\n"
#&print_tree();
#print "hiii\n";
}
1;
package ILMT::URD::HIN::ComputeHead::get_head_vg;
use Exporter qw(import);
use ILMT::URD::HIN::SSFAPI::feature_filter;
use ILMT::URD::HIN::SSFAPI::shakti_tree_api;
our @EXPORT = qw(copy_head_vg);
#&AddID($ARGV[0]);
sub copy_head_vg
{
my($pos_tag) = $_[0];
my($sent) = $_[1];
my %hash=();
if($pos_tag =~ /^NP/)
{
$match = "N";
}
if($pos_tag =~ /^V/ )
{
$match = "V";
}
if($pos_tag =~ /^JJP/ )
{
$match = "J";
}
if($pos_tag =~ /^CCP/ )
{
$match = "CC";
}
if($pos_tag =~ /^RBP/ )
{
$match = "RB";
}
@np_nodes = &get_nodes(3,$pos_tag,$sent);
for($i=$#np_nodes; $i>=0; $i--)
{
my(@childs) = &get_children($np_nodes[$i],$sent);
$j = 0;
while($j <= $#childs)
{
my($f0,$f1,$f2,$f3,$f4) = &get_fields($childs[$j],$sent);
$word=$f2;
if($f3 =~ /^$match/)
{
$new_fs = $f4;
my $fs_ref = &read_FS($f4);
my @name_val = &get_values("name", $fs_ref);
if($hash{$f2} eq "")
{
$hash{$word}=1;
}
elsif($hash{$f2} ne "")
{
$hash{$word}=$hash{$word}+1;
}
$id=$hash{$word};
my ($x,$y)=split(/>/,$f4);
$x =~ s/ name=[^ >]+//;
if($id==1)
{
$att_val="$word";
}
elsif($id!=1)
{
$att_val="$word"."_"."$id";
}
$new_fs = $x." head=\'$name_val[0]\'>";
#$new_fs = $x." head=$name_val[0]>";
#my $new_head_fs=$x." name=\"$att_val\">";
#&modify_field($childs[$j],4,$new_fs,$sent);
last;
}
elsif($j == 0)
{
my($f0,$f1,$f2,$f3,$f4) = &get_fields($childs[$#childs],$sent);
$word=$f2;
my $fs_ref = &read_FS($f4);
my @name_val = &get_values("name", $fs_ref);
if($hash{$f2} eq "")
{
$hash{$word}=1;
}
elsif($hash{$f2} ne "")
{
$hash{$word}=$hash{$word}+1;
}
$id=$hash{$word};
my ($x,$y)=split(/>/,$f4);
$x =~ s/ name=[^ >]+//;
if($id==1)
{
$att_val="$word";
}
elsif($id!=1)
{
$att_val="$word"."_"."$id";
}
$new_fs = $x." head=\'$name_val[0]\'>";
#$new_fs = $x." head=$name_val[0]>";
#my $new_head_fs=$x." name=\"$att_val\">";
#&modify_field($childs[$#childs],4,$new_fs,$sent);
}
$j++;
}
($f0,$f1,$f2,$f3,$f4) = &get_fields($np_nodes[$i],$sent);
if($f4 eq '')
{
&modify_field($np_nodes[$i],4,$new_fs,$sent);
}
else
{
$fs_ptr = &read_FS($f4,$sent);
$new_fs_ptr = &read_FS($new_fs,$sent);
&merge($fs_ptr,$new_fs_ptr,$sent);
$fs_string = &make_string($fs_ptr,$sent);
&modify_field($np_nodes[$i],4,$fs_string,$sent);
}
}
}
1;
package ILMT::URD::HIN::ComputeHead::make_chunk_name;
use Exporter qw(import);
use ILMT::URD::HIN::SSFAPI::feature_filter;
use ILMT::URD::HIN::SSFAPI::shakti_tree_api;
our @EXPORT = qw(make_chunk_name);
#use strict;
sub make_chunk_name()
{
my($i, @leaves, $new_fs, @tree, $line, $string, $file, @lines, @string2, $string_ref1, $string1, $string_name);
$input = $_[0];
my %hash_index;
my %hash_chunk;
my @final_tree;
#&read_story($input);
my @tree = &get_children(0, $input);
my $ssf_string = &get_field($tree[0], 3, $input);
if($ssf_string eq "SSF")
{
@final_tree = &get_children(1, $input);
}
else
{
@final_tree = @tree;
}
my $k, $index=0, $count=0, $index_chunk=0;
@tree = &get_children($s,$input);
foreach $i(@final_tree)
{
$string = &get_field($i, 4,$input);
@leaves = &get_children($i,$input);
my $string_fs = &read_FS($string, $input);
foreach $m(@leaves)
{
$string1 = &get_field($m, 4,$input);
$string_fs1 = &read_FS($string1, $input);
$new_fs = &make_string($string_fs1, $input);
&modify_field($m, 4, $new_fs, $input);
}
}
foreach $i(@final_tree)
{
my $count_chunk=0;
$index_chunk++;
$string = &get_field($i, 4, $input);
$string_fs = &read_FS($string, $input);
my @old_value_name = &get_values("name", $string_fs, $input);
#print @old_value_name,"\n";
if($old_value_name[0]=~/\'/ or $old_drel[0]=~/\"/)
{
$old_value_name[0]=~s/\'//g;
$old_value_name[0]=~s/\"//g;
}
my @chunk = &get_field($i, 3, $input);
for ($ite1=1; $ite1<$index_chunk; $ite1++)
{
my $actual_chunk_name = $hash_chunk{$ite1};
my @chunk_name_split = split(/__/, $actual_chunk_name);
if($chunk_name_split[0] eq $chunk[0])
{
$count_chunk++;
}
}
my @chunk1;
if($count_chunk == 0)
{
$hash_chunk{$index_chunk} = "$chunk[0]"."__1";
$chunk1[0] = $chunk[0];
}
else
{
$new_count_chunk = $count_chunk+1;
$chunk1[0] = "$chunk[0]"."$new_count_chunk";
$hash_chunk{$index_chunk} = "$chunk[0]"."__$new_count_chunk";
}
foreach $m_drel(@final_tree)
{
my $string_child = &get_field($m_drel, 4, $input);
my $string_fs_child = &read_FS($string_child, $input);
my @old_drel = &get_values("drel", $string_fs_child, $input);
my @old_dmrel = &get_values("dmrel", $string_fs_child, $input);
my @old_reftype = &get_values("reftype", $string_fs_child, $input);
my @old_coref = &get_values("coref", $string_fs_child, $input);
#my @old_attr = &get_attributes($string_fs_child, $input);
if($old_drel[0]=~/\'/ or $old_drel[0]=~/\"/)
{
$old_drel[0]=~s/\'//g;
$old_drel[0]=~s/\"//g;
}
if($old_dmrel[0]=~/\'/ or $old_dmrel[0]=~/\"/)
{
$old_dmrel[0]=~s/\'//g;
$old_dmrel[0]=~s/\"//g;
}
if($old_reftype[0]=~/\'/ or $old_reftype[0]=~/\"/)
{
$old_reftype[0]=~s/\'//g;
$old_reftype[0]=~s/\"//g;
}
if($old_coref[0]=~/\'/ or $old_coref[0]=~/\"/)
{
$old_coref[0]=~s/\'//g;
$old_coref[0]=~s/\"//g;
}
my @old_drel_name = split(/:/, $old_drel[0]);
my @old_dmrel_name = split(/:/, $old_dmrel[0]);
my @old_reftype_name = split(/:/, $old_reftype[0]);
my @old_coref_name = split(/:/, $old_coref[0]);
if(($old_drel_name[1] eq $old_value_name[0]) && ($old_drel_name[1] ne ""))
{
my @new_drel;
$new_drel[0] = "$old_drel_name[0]:$chunk1[0]";
&del_attr_val("drel", $string_fs_child, $input);
# &add_attr_val("drel", \@new_drel, $string_fs_child, $input);
}
if(($old_dmrel_name[1] eq $old_value_name[0]) && ($old_dmrel_name[1] ne ""))
{
my @new_dmrel;
$new_dmrel[0] = "$old_dmrel_name[0]:$chunk1[0]";
&del_attr_val("dmrel", $string_fs_child, $input);
# &add_attr_val("dmrel", \@new_dmrel, $string_fs_child, $input);
}
if(($old_reftype_name[1] eq $old_value_name[0]) && ($old_reftype_name[1] ne ""))
{
my @new_reftype;
$new_reftype[0] = "$old_reftype_name[0]:$chunk1[0]";
&del_attr_val("reftype", $string_fs_child, $input);
# &add_attr_val("reftype", \@new_reftype, $string_fs_child, $input);
}
if(($old_coref_name[0] eq $old_value_name[0]) && ($old_coref_name[0] ne ""))
{
my @new_coref;
$new_coref[0] = $chunk1[0];
&del_attr_val("coref", $string_fs_child, $input);
# &add_attr_val("coref", \@new_coref, $string_fs_child, $input);
}
# my $name_attribute_chunk = &make_string($string_fs_child, $input);
# &modify_field($m_drel, 4, $name_attribute_chunk, $input);
}
&del_attr_val("name", $string_fs, $input);
# &add_attr_val("name", \@chunk1, $string_fs, $input);
# my $name_fs_chunk = &make_string($string_fs, $input);
# &modify_field($i, 4, $name_fs_chunk, $input);
my $string1 = &get_field($i, 4, $input);
my $attr = &read_FS($string1, $input);
#my @attribute_array = &get_attributes($attr, $input);
#$count=@attribute_array;
#print $count, "\n";
}
foreach $i(@final_tree)
{
$string = &get_field($i, 4, $input);
@leaves = &get_children($i, $input);
foreach $m(@leaves)
{
$count=0;
$index++;
$string2 = &get_field($m, 4, $input);
$string_fs2 = &read_FS($string2, $input);
my @token = &get_field($m, 2, $input);
for ($ite=1; $ite<$index; $ite++)
{
my $actual_name = $hash_index{$ite};
my @name_split = split(/__/, $actual_name);
if($name_split[0] eq $token[0])
{
$count++;
}
}
if($count == 0)
{
my @token1;
$token1[0] = $token[0];
&del_attr_val("name", $string_fs2, $input);
&add_attr_val("name", \@token1, $string_fs2, $input);
my $name_fs = &make_string($string_fs2, $input);
&modify_field($m, 4, $name_fs,$input);
$hash_index{$index} = "$token[0]"."__1";
}
else
{
$new_count = $count+1;
my @new_token = "$token[0]"."$new_count";
&del_attr_val("name", $string_fs2, $input);
&add_attr_val("name", \@new_token, $string_fs2,$input);
my $name_fs = &make_string($string_fs2,$input);
&modify_field($m, 4, $name_fs, $input);
$hash_index{$index} = "$token[0]"."__$new_count";
}
}
}
}
1;
# Object files
*.o
*.ko
*.obj
*.elf
# Precompiled Headers
*.gch
*.pch
# Libraries
*.lib
*.a
*.la
*.lo
# Shared objects (inc. Windows DLLs)
*.dll
*.so
*.so.*
*.dylib
# Executables
*.exe
*.out
*.app
*.i*86
*.x86_64
*.hex
# Debug files
*.dSYM/
# Files Generated on Compilation
lib/ILMT/URD/HIN/Morph/analyser/data/avy.c
lib/ILMT/URD/HIN/Morph/analyser/data/const.c
lib/ILMT/URD/HIN/Morph/analyser/data/const.h
lib/ILMT/URD/HIN/Morph/analyser/data/dict_final
lib/ILMT/URD/HIN/Morph/analyser/data/feature_value.c
lib/ILMT/URD/HIN/Morph/analyser/data/pdgm_offset_info
lib/ILMT/URD/HIN/Morph/analyser/data/shell/suff_info
lib/ILMT/URD/HIN/Morph/analyser/data/suff
lib/ILMT/URD/HIN/Morph/analyser/data/suff_info.c
lib/ILMT/URD/HIN/Morph/analyser/data/uword.dbm
morph.log
[submodule "API"]
path = API
url = https://gitlab.com/ilmt/ILMT-URD-HIN-SSFAPI.git
use strict;
use warnings;
use Data::Dumper;
use Graph::Directed;
use JSON;
use List::Util qw(reduce);
use Mojolicious::Lite;
use Mojo::Redis2;
use lib "./lib";
use ILMT::URD::HIN::Morph;
my $modulename = "ilmt.urd.hin.morph";
my %database = ();
helper redis => sub {
state $r = Mojo::Redis2->new(url => "redis://redis:6379");
};
sub process {
my $hash = $_[0];
my %newhash;
if (keys %{$hash} == 1) {
%newhash = (data => (%{$hash})[1]);
} else {
@newhash{ map { s/_[^_]*$//r } keys %{$hash} } = values %{$hash};
}
return ILMT::URD::HIN::Morph::process(%newhash);
}
sub genError {
my $c = shift;
my $error = shift;
$c->render(json => to_json({Error => $error}), status => 400);
}
sub genDAGGraph {
my %edges = %{$_[0]};
my $g = Graph::Directed->new();
foreach my $from (keys %edges) {
foreach my $to (@{$edges{$from}}) {
$g->add_edge($from, $to);
}
}
return $g;
}
post '/pipeline' => sub {
my $c = shift;
my $ilmt_json = decode_json($c->req->body);
my $ilmt_modid = $ilmt_json->{modid} || genError($c, "No ModuleID Specified!") && return;
my $ilmt_jobid = $ilmt_json->{jobid} || genError($c, "No JobID Specified!") && return;
my $ilmt_data = $ilmt_json->{data} || genError($c, "No Data Specified!") && return;
my $ilmt_dag = genDAGGraph($ilmt_json->{edges});
genError($c, "Edges not specified!") && return if (!$ilmt_dag);
my $ilmt_module = $modulename . '_' . $ilmt_modid;
my @ilmt_inputs = map {@$_[0]} $ilmt_dag->edges_to($ilmt_module);
if (!$database{$ilmt_jobid}) {
$database{$ilmt_jobid} = {};
$database{"data_$ilmt_jobid"} = {};
}
foreach (@ilmt_inputs) {
my $input_module = $_ =~ s/_[^_]*$//r;
$database{$ilmt_jobid}{$input_module} = $ilmt_data->{$_} if $ilmt_data->{$_};
}
%{$database{"data_$ilmt_jobid"}} = (%{$database{"data_$ilmt_jobid"}}, %{$ilmt_data});
if (@ilmt_inputs == keys %{$database{$ilmt_jobid}}) {
$c->render(json => "{Response: 'Processing...'}", status => 202);
my $ilmt_output = process($database{$ilmt_jobid});
$ilmt_data->{$ilmt_module} = $ilmt_output;
%{$ilmt_data} = (%{$ilmt_data}, %{$database{"data_$ilmt_jobid"}});
my @ilmt_next = map {@$_[1]} $ilmt_dag->edges_from($ilmt_module);
if (@ilmt_next) {
foreach (@ilmt_next) {
my @module_info = split(/_([^_]+)$/, $_);
my $next_module = $module_info[0];
$ilmt_json->{modid} = $module_info[1];
$c->ua->post("http://$next_module/pipeline" => json
=> from_json(encode_json($ilmt_json), {utf8 => 1}) => sub {
my ($ua, $tx) = @_;
my $msg = $tx->error ? $tx->error->{message} : $tx->res->body;
$c->app->log->debug("[$ilmt_jobid]: $msg\n");
});
}
} else {
$c->redis->publish($ilmt_jobid => encode_json($ilmt_json));
}
delete $database{$ilmt_jobid};
} else {
$c->render(json => "{Response: 'Waiting for more inputs...'}", status => 202);
}
};
app->start;
package ILMT::URD::HIN::Morph;
use strict;
use warnings;
use Dir::Self;
use Data::Dumper;
use IO::Socket::INET;
use ILMT::URD::HIN::Morph::adj_gen;
my $cwd = __DIR__;
my %daemons = (
"morph" => {
"path" => "$cwd/Morph/analyser/morph_urd.exe",
"args" => "--logfilepath $cwd/Morph/morph.log " .
"--pdgmfilepath $cwd/Morph/analyser/data/ " .
"--uwordpath $cwd/Morph/analyser/data/dict_final " .
"--dictfilepath $cwd/Morph/analyser/data/dict " .
"-ULDWH --tcpserver",
"port" => "31002"
},
"nuqta_adder" => {
"path" => "$cwd/Morph/nuqta-adder.py",
"args" => "",
"port" => "31003"
}
);
my @dispatch_seq = (
"remove_sentence_tag",
"remove_ssf",
"morph_analyser",
"nuqta_adder_urd",
"add_sentence_tag",
"adjective_generator"
);
sub add_sentence_tag {
my %par = @_;
my $data = $par{'data'};
open INFILE, '<', \$data or die $!;
my $result = "<Sentence id=\"1\">\n";
while (my $line=<INFILE>) {
$result .= $line;
}
$result .= "</Sentence>\n";
return $result;
}
sub morph_analyser {
my %args = @_;
return call_daemon("morph", $args{data});
}
sub nuqta_adder_urd {
my %args = @_;
return call_daemon("nuqta_adder", $args{data});
}
sub process {
my %args = @_;
utf8::encode($args{"data"});
foreach my $submodule (@dispatch_seq) {
$args{'data'} = __PACKAGE__->can($submodule)->(%args);
}
utf8::decode($args{"data"});
return $args{"data"};
}
sub remove_sentence_tag {
my %par = @_;
my $data = $par{'data'};
open INFILE, '<', \$data or die $!;
my $result = "";
while(my $line=<INFILE>) {
if($line=~/^</) {
next;
} else {
$result .= $line;
}
}
return $result;
}
sub remove_ssf {
my %par = @_;
my $data = $par{'data'};
open INFILE, '<', \$data or die $!;
my $result = "";
my $_prev = 1;
while (my $line=<INFILE>) {
if ($line=~m/^0\t/) {
$_prev=2;
next;
} elsif ($line=~/^\t\)\)/ and $_prev=~/^\t\)\)/) {
$_prev="\t))";
next;
} else {
$_prev="$line";
$result .= "$line";
}
}
return $result;
}
sub run_daemons {
my @daemon_names = @_;
foreach my $daemon_name (@daemon_names) {
my %daemon = %{$daemons{$daemon_name}};
my $cmd = "$daemon{path} $daemon{args} $daemon{port} &";
my $runfile = __DIR__ . "/run/${daemon_name}_$daemon{port}";
system("flock -e -w 0.01 $runfile -c '$cmd'") == 0
or warn "[" . __PACKAGE__ . "]: Port $daemon{port} maybe unavailable! $?\n";
}
}
sub call_daemon {
my ($daemon_name, $input) = @_;
my $port = $daemons{$daemon_name}{port};
my ($socket, $client_socket);
$socket = new IO::Socket::INET (
PeerHost => '127.0.0.1',
PeerPort => $port,
Proto => 'tcp',
) or die "ERROR in Socket Creation : $!\n";
$socket->send("$input\n");
my $result = "";
while (my $line = $socket->getline) {
$result .= $line;
}
$socket->close();
return $result;
}
sub adjective_generator {
my %par = @_;
my $data = $par{'data'};
my $result = "";
adj_gen(\$data, \$result);
return ($result);
}
run_daemons(("morph", "nuqta_adder"));
1;
package ILMT::URD::HIN::Morph::adj_gen;
use Exporter qw(import);
use ILMT::URD::HIN::SSFAPI::feature_filter;
use ILMT::URD::HIN::SSFAPI::shakti_tree_api;
our @EXPORT = qw(adj_gen);
sub adj_gen {
my ($input, $output) = @_;
open ($infile, '<', $input) or die "$!";
open ($outfile, '>', $output) or die "$!";
while($line=<$infile>)
{
chomp ($line);
($addr, $tkn, $pos, $fs) = split(/\t/,$line);
if($fs ne "")
{
@fss = split(/\|/, $fs);
my $len = @fss;
@string = "";
$newfs = "";
my $i=0;
foreach $af (@fss)
{
my $FSreference = &read_FS($af, $line);
my @lex_val = &get_values("lex", $FSreference);
my @cat_val = &get_values("cat", $FSreference);
my @gen_val = &get_values("gen", $FSreference);
my @num_val = &get_values("num", $FSreference);
#print "lex:$lex cat:$cat gen:$gen num:$num\n";
if ($lex_val[0] =~ /(\.)*I$/ and $cat_val[0] eq "adj" and $gen_val[0] eq "f" and $num_val[0] eq "any")
{
$lex = $lex_val[0];
$lex =~ s/I$/A/;
#print "my lex $lex";
my @lex_arr=();
push @lex_arr,$lex;
&update_attr_val("lex", \@lex_arr, $FSreference, $af);
$string[$i] = &make_string($FSreference, $af);
}
elsif ($lex_val[0] =~ /(\.)*(IM|Iz)$/ and $cat_val[0] eq "adj" and $gen_val[0] eq "f" and $num_val[0] eq "any")
{
$lex = $lex_val[0];
$lex =~ s/(IM|Iz)$/Az/;
#print "my lex $lex";
my @lex_arr=();
push @lex_arr,$lex;
&update_attr_val("lex", \@lex_arr, $FSreference, $af);
$string[$i] = &make_string($FSreference, $af);
}
else
{
$lex = $lex_val[0];
my @lex_arr=();
push @lex_arr,$lex;
&update_attr_val("lex", \@lex_arr, $FSreference, $af);
$string[$i] = &make_string($FSreference, $af);
}
$i++;
}
foreach $string (@string)
{
if(--$len)
{
$newfs=$newfs.$string."|";
}
else
{
$newfs=$newfs.$string;
}
}
delete @string[0..$#string];
delete @lex_root[0..$#lex_root];
delete @fss[0..$#fss];
if($line =~ /\(\(/ or $line =~ /\)\)/)
{
($addr1,$lex,$pos,$fs) = split(/\t/,$line);
if ($output ne "")
{
#print $outfile $num,"\t",$lex,"\t",$pos,"\t",$newfs,"\n";
print $outfile "$addr1\t$lex\t$pos\t$newfs\n";
}
else
{
print $addr1,"\t",$lex,"\t",$pos,"\t",$newfs,"\n";
}
}
else
{
if ($output ne "")
{
print $outfile $addr,"\t",$tkn,"\t",$pos,"\t",$newfs,"\n";
}
else
{
print $addr,"\t",$tkn,"\t",$pos,"\t",$newfs,"\n";
}
}
} # end if fs ne ""
else { # try to understand this else block
if($lex ne "((" and $lex ne "))")
{
if ($output ne "")
{
print $outfile $addr,"\t",$tkn,"\t",$pos,"\t",$fs,"\n";
}
else
{
print $addr,"\t",$tkn,"\t",$pos,"\t",$fs,"\n";
}
}
else {
if ($output ne "")
{
print $outfile $line."\n";
}
else
{
print $line."\n";
}
}
}
} # end while loop
close($infile);
close($outfile);
}
/**
* File Name : Avy_srch.c
*/
/** Function : Avy_srch
* Avy_srch(morph)
* This function checks whether the given word is Avy or not.
* If it is true it returns the numeric value.
* it takes morph of char type as argument. whrer morph is the input word
* Return :int , returns a numeric value indicating as a numeral or a special
* character
*/
#include <string.h>
#include <stdio.h>
#include <ctype.h>
#include "defn.h"
#include "morph_logger.h"
extern char *program_name;
extern FILE *log_file;
extern char *log_messg;
#define FUNCTION "Avy_srch()"
extern int sizeof_ind; /* table size */
extern char indword[][AVYWORDSIZE]; /* table start address */
extern void *my_bsearch();
/**
* Prototypes of the functions in this file with a file scope
*/
int AVY_srch(morph)
char morph[Morphsize]; /* word */
{
int loop1, size_avy_struct; /* size of avy */
char *avy_ptr, dummy_avy[AVYWORDSIZE]; /* pointer of avy , dummy avy */
int morph_size; /* word length */
int ISNUMBER; /* isnumber for checking numeric value */
PRINT_LOG(log_file,
" checking whether the given word is Avy or not.\n");
size_avy_struct = sizeof(dummy_avy);
/* to check if given word is nonalphabet */
morph_size = strlen(morph);
ISNUMBER = 1;
/* check if loop is alpha numeric or not */
for (loop1 = 0; loop1 <= morph_size; ++loop1)
if (isalpha(morph[loop1])) { /* if morph is alpha numeric */
ISNUMBER = 0;
break;
}
if (ISNUMBER) /* if ISNUMBER is true */
return (-3); /* numeral */
else {
/*** searching in avy file ***/
/* this function returns the ptr to the location of the key in the table, if it is present else returns a null */
avy_ptr =
(char *) my_bsearch(morph, (char *) indword, sizeof_ind,
size_avy_struct, strcmp);
sprintf(log_messg, "INFO: AVY has identifeid as %s", avy_ptr);
PRINT_LOG(log_file,
" checking whether the given word is Avy or not.\n");
if (avy_ptr != NULL) { /* searched in AVY file */
/* free(avy_ptr); */
return (-1);
} else {
return (-2);
} /* Neither a numeral not an avy */
}
}
CC = gcc
CC_FLAGS = -g -DGNU -c
INCLUDE_C_DATA =./data/
OBJECTF = Avy_srch.o avy.o new_build_struct_hin.o chk_uword_dict.o const.o cp_suff_add_struct.o cp_to_struct.o cp_to_struct_chk_pdgm.o dict_lookup.o feature_value.o get_paradigm.o get_pdgm_info.o get_suff.o main.o morph_hindi.o my_fatal.o match_dict.o order_ssf.o print.o print_ssf_hori.o print_default_ans.o read_file.o snt_mark.o suff_build_struct.o suff_info.o suff_tab_lookup.o verify_root.o fgetword.o my_blk_bsrch.o my_bsrch.o get_spell_variation.o lex_port.o print_spell_variation.o morph_logger.o
morph_urd.exe : ${OBJECTF}
${CC} -o morph_urd.exe ${OBJECTF} -lgdbm `pkg-config --cflags glib-2.0` `pkg-config --libs glib-2.0`
fgetword.o : fgetword.c morph_logger.h
${CC} ${CC_FLAGS} fgetword.c
my_blk_bsrch.o : my_blk_bsrch.c
${CC} ${CC_FLAGS} my_blk_bsrch.c
my_bsrch.o : my_bsrch.c
${CC} ${CC_FLAGS} my_bsrch.c
Avy_srch.o : Avy_srch.c
${CC} ${CC_FLAGS} Avy_srch.c
avy.o : data_files ${INCLUDE_C_DATA}avy.c defn.h ${INCLUDE_C_DATA}const.h
${CC} ${CC_FLAGS} ${INCLUDE_C_DATA}avy.c
new_build_struct_hin.o : new_build_struct_hin.c struct.h
${CC} ${CC_FLAGS} new_build_struct_hin.c
chk_uword_dict.o : chk_uword_dict.c defn.h struct1.h
${CC} ${CC_FLAGS} chk_uword_dict.c
const.o : data_files ${INCLUDE_C_DATA}const.c
${CC} ${CC_FLAGS} ${INCLUDE_C_DATA}const.c
cp_suff_add_struct.o : cp_suff_add_struct.c struct.h struct1.h
${CC} ${CC_FLAGS} cp_suff_add_struct.c
cp_to_struct.o : cp_to_struct.c struct.h
${CC} ${CC_FLAGS} cp_to_struct.c
cp_to_struct_chk_pdgm.o : cp_to_struct_chk_pdgm.c struct.h
${CC} ${CC_FLAGS} cp_to_struct_chk_pdgm.c
dict_lookup.o : dict_lookup.c defn.h
${CC} ${CC_FLAGS} dict_lookup.c
feature_value.o : data_files ${INCLUDE_C_DATA}feature_value.c defn.h ${INCLUDE_C_DATA}const.h
${CC} ${CC_FLAGS} ${INCLUDE_C_DATA}feature_value.c
get_paradigm.o : get_paradigm.c defn.h
${CC} ${CC_FLAGS} get_paradigm.c
get_pdgm_info.o : get_pdgm_info.c defn.h
${CC} ${CC_FLAGS} get_pdgm_info.c
get_suff.o : get_suff.c defn.h
${CC} ${CC_FLAGS} get_suff.c
lex_port.o : lex_port.c const.h struct.h
${CC} ${CC_FLAGS} lex_port.c
main.o : main.c struct.h struct1.h defn.h glbl.h morph_logger.h
${CC} `pkg-config --cflags glib-2.0` `pkg-config --libs glib-2.0` ${CC_FLAGS} main.c
morph_logger.o: morph_logger.c
${CC} ${CC_FLAGS} morph_logger.c
my_fatal.o : my_fatal.c
${CC} ${CC_FLAGS} my_fatal.c
match_dict.o : match_dict.c defn.h struct.h struct1.h
${CC} ${CC_FLAGS} match_dict.c
morph_hindi.o : morph_hindi.c struct.h struct1.h
${CC} ${CC_FLAGS} morph_hindi.c
order_ssf.o : order_ssf.c
${CC} ${CC_FLAGS} order_ssf.c
print.o : print.c struct.h
${CC} ${CC_FLAGS} print.c
print_ssf_hori.o : print_ssf_hori.c struct.h
${CC} ${CC_FLAGS} print_ssf_hori.c
print_default_ans.o : print_default_ans.c defn.h morph_logger.h
${CC} ${CC_FLAGS} print_default_ans.c
read_file.o : read_file.c defn.h
${CC} ${CC_FLAGS} read_file.c
snt_mark.o : snt_mark.c defn.h
${CC} ${CC_FLAGS} snt_mark.c
suff_build_struct.o : suff_build_struct.c defn.h struct.h
${CC} ${CC_FLAGS} suff_build_struct.c
suff_tab_lookup.o : suff_tab_lookup.c defn.h struct.h
${CC} ${CC_FLAGS} suff_tab_lookup.c
suff_info.o : data_files ${INCLUDE_C_DATA}suff_info.c struct.h ${INCLUDE_C_DATA}const.h
${CC} ${CC_FLAGS} ${INCLUDE_C_DATA}suff_info.c
verify_root.o : verify_root.c struct.h struct1.h defn.h
${CC} ${CC_FLAGS} verify_root.c
get_spell_variation.o : get_spell_variation.c
${CC} ${CC_FLAGS} get_spell_variation.c
print_spell_variation.o : print_spell_variation.c struct1.h
${CC} ${CC_FLAGS} print_spell_variation.c
data_files:
make -C data/shell
clean:
rm -f *.o
rm -f morph_urd.exe ../morph_urd.exe
make -C data/shell clean
#ifndef C_API_V2_H
#define C_API_V2_H
#include "functions.h"
#include "core_functions.h"
#include "ssf_functions.h"
#include "fs_functions.h"
#endif
#include "c_api_v2.h"
main(int argc ,char *argv[])
{
/*checking for creation and printing the tree ---Working*/
node *tree ;
tree=create_tree();
read_ssf_from_file(tree, argv[1]);
print_tree(tree);
printf("\n\n");
node *child= get_nth_child(tree, 2);
print_node_without_index(child);
printf("\n\n");
/*Checking add_attr_val-----Working*/
add_attr_val(child->OR, "iiit", "hyderabad");
printf("%s\n", make_or_node_to_string(child->OR)) ;
printf("Checked add_attr_val\n\n");
/*Checking update_attr_val--------Working*/
update_attr_val(child->OR, "iiit", "Andhrapradesh" );
printf("%s\n", make_or_node_to_string(child->OR)) ;
printf("Checked update_attr_val\n\n");
/*Checking add_attr_val_2--------Working*/
add_attr_val_2(child->OR->fs[0], "gvs", "reddy");
printf("%s\n", make_fs_struct_to_string(child->OR->fs[0]));
printf("Checked add_attr_val_2\n\n");
/*Checking update_attr_val_2------Working*/
update_attr_val_2(child->OR->fs[0],"gvs", "goli");
printf("%s\n", make_fs_struct_to_string(child->OR->fs[0]));
printf("Checked update_attr_val_2\n\n");
/*Checking del_attr & del_attr_2------Working*/
del_attr(child->OR, "iiit");
printf("%s\n", make_or_node_to_string(child->OR)) ;
printf("Checked del_attr & del_attr_2\n\n");
/*Checking get_attr_val--------Working*/
printf("%s\n", get_attr_val(child->OR->fs[0], "gvs"));
printf("Checked get_attr_val\n\n");
/*Checking get_nodes_with_attr_val--------Working*/
list_of_nodes *L= get_nodes_with_attr_val(tree, "head", "3");
printf("%d\n", L->size);
int i;
for (i=0; i<L->size; i++)
{
print_attr_of_or_node(L->l[i]->OR);
printf("\n");
}
/*Checking delete_fs_struct_from_or_node---Working*/
printf("\n\nbefore deleton-----");
print_attr_of_or_node(child->OR);
printf("\n");
delete_fs_struct_from_or_node(child->OR, 1);
printf("after deletion----");
print_attr_of_or_node(child->OR);
printf("\n");
printf("Checked delete_fs_struct_from_or_node\n\n");
/*Checking add_fs_struct_to_or_node-----Working*/
fs_struct *fs=read_fs("<af=watch,,,,3,0,,/new=fs/head=3>");
add_fs_struct_to_or_node(child->OR, fs);
print_attr_of_or_node(child->OR);
printf("\n");
printf("Checked add_fs_struct_to_or_node\n\n");
}
#include "c_api_v2.h"
main(int argc, char *argv[])
{
node *tree=create_tree();
read_ssf_from_file(tree, argv[1]);
print_tree(tree);
}
#include "c_api_v2.h"
#include <iostream>
using namespace std;
main(int argc, char *argv[])
{
node *tree=create_tree();
read_ssf_from_file(tree, argv[1]);
print_tree_to_file(tree, "output");
}
#Comments in SSF
#This is just a sample file. This may not be correct according to linguistics
1 (( NP <fs af='udklasjd,unkn,m,p,3,0,,' head=1>
1.1 " NNS <fs af=',PUNC,m,p,3,0,,'>
))
2 (( VG <fs af='watch,v,m,s,3,0,,' aspect=PROG head=2>
2.1 are VBP <fs af='be,v,m,s,3,0,,' head=2>
2.2 watching VBG <fs af='watch,v,m,s,3,0,,' aspect=PROG>
))
3 (( PP <fs af='watch,,,,3,0,gvs,reddy' aspect=PRoG head=2>|<fs af='watch,,,,3,0,dfmd,reddy' aspect=PROG head=2>
3.1 in IN <fs af='in,v,m,s,3,0,,' aspect=PROG head=3>
3.2 (( NP <fs af='the,v,m,s,3,0,,' aspect=PROG head=4>
3.2.1 the DT <fs af='house,v,m,s,3,0,,' aspect=PROG head=3>
3.2.2 house NN <fs af='the,v,m,s,3,0,,' aspect=hgfd>
))
))
int sizeof_lex = 10 ;
int sizeof_ind = 19 ;
int sizeof_suff_add = 1162 ;
int total_fe_info = 190 ;
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment