Commit f0742d16 authored by Thodoris Nestoridis's avatar Thodoris Nestoridis

fixes

parent b0331218
......@@ -80,6 +80,10 @@ export class PrefixDetailsComponent implements OnInit {
ngOnInit(): void {
this.getchoices(this.route.snapshot.params.gb);
this.getsubject();
this.getoobject();
this.getverb();
this.getlogcon();
this.getAllPrefix(this.route.snapshot.params.id);
}
......@@ -161,49 +165,63 @@ export class PrefixDetailsComponent implements OnInit {
for (let x=0; x <strIntoObj.length; x++ ){
if(this.simpleprefix.indexOf(strIntoObj[x][1]) == -1) {
this.simpleprefix.push(strIntoObj[x][1]);}}
//get logic connective
let strlogic_con = JSON.parse(data[0].prefix_logcon_choices)
for (let x=0; x <strlogic_con.length; x++ ){
if(this.logic_con.indexOf(strlogic_con[x][1]) == -1) {
this.logic_con.push(strlogic_con[x][1]);}}
//get prefix verbs
let strprverb_con = JSON.parse(data[0].prefix_verb_choices)
for (let x=0; x < strprverb_con.length; x++ ){
if(this.verbs.indexOf(strprverb_con[x][1]) == -1) {
this.verbs.push(strprverb_con[x][1]);}}
//get prefix subjects
let strprsub = JSON.parse(data[0].system_choices)
for (let x=0; x < strprsub.length; x++ ){
if(this.subjects.indexOf(strprsub[x][1]) == -1) {
this.subjects.push(strprsub[x][1]);}}
let strprsub2 = JSON.parse(data[0].function_choices)
for (let x=0; x < strprsub2.length; x++ ){
if(this.subjects.indexOf(strprsub2[x][1]) == -1) {
this.subjects.push(strprsub2[x][1]);}}
let strprsub3 = JSON.parse(data[0].item_choices)
for (let x=0; x < strprsub3.length; x++ ){
if(this.subjects.indexOf(strprsub3[x][1]) == -1) {
this.subjects.push(strprsub3[x][1]);}}
//get prefix objects
let strprobj = JSON.parse(data[0].item_choices)
for (let x=0; x < strprobj.length; x++ ){
if(this.objects.indexOf(strprobj[x][1]) == -1) {
this.objects.push(strprobj[x][1]);}}
let strprobj2 = JSON.parse(data[0].function_choices)
for (let x=0; x < strprobj2.length; x++ ){
if(this.objects.indexOf(strprobj2[x][1]) == -1) {
this.objects.push(strprobj2[x][1]);}}
let strprobj3 = JSON.parse(data[0].state_value_choices)
for (let x=0; x < strprobj3.length; x++ ){
if(this.objects.indexOf(strprobj3[x][1]) == -1) {
this.objects.push(strprobj3[x][1]);}}
let strprobj4 = JSON.parse(data[0].flow_choices)
for (let x=0; x < strprobj4.length; x++ ){
if(this.objects.indexOf(strprobj4[x][1]) == -1) {
this.objects.push(strprobj4[x][1]);}}
},
error => {
console.log(error);
});
}
getsubject(): void{
this.prefixService.getsubject()
.subscribe(
data => {
for (let x=0; x < data.length; x++ ){
if(this.subjects.indexOf(data[x][1]) == -1) {
this.subjects.push((data[x][1]));}}
},
error => {
console.log(error);
});
}
getoobject(): void{
this.prefixService.getoobject()
.subscribe(
data => {
for (let x=0; x < data.length; x++ ){
if(this.objects.indexOf(data[x][1]) == -1) {
this.objects.push((data[x][1]));}}
},
error => {
console.log(error);
});
}
getverb(): void{
this.prefixService.getverb()
.subscribe(
data => {
for (let x=0; x < data.length; x++ ){
if(this.verbs.indexOf(data[x][1]) == -1) {
this.verbs.push((data[x][1]));}}
},
error => {
console.log(error);
});
}
getlogcon(): void{
this.prefixService.getlogcon()
.subscribe(
data => {
for (let x=0; x < data.length; x++ ){
if(this.logic_con.indexOf(data[x][1]) == -1) {
this.logic_con.push((data[x][1]));}}
},
error => {
console.log(error);
});
}
}
......@@ -45,9 +45,9 @@ FUNCTION_SUBCLASSES_COMMENTS = get_dmo_classes_of_classes_and_comment("SAO#Funct
FUNCTION_INSTANCES_COMMENTS = get_dmo_instance_and_comment("SAO#Function")
#
SHALL_CHOICES = ( ("shall", "shall"), ("shall not", "shall not"))
QUANTIFIER_CHOICES = ( ("NONE","NONE"), ("ALL", "ALL"), ("ONLY", "ONLY"),
("MORE THAN", "MORE THAN"), ("LESS THAN", "LESS THAN"), ("EXACTLY","EXACTLY"),
("AT LEAST", "AT LEAST"), ("AT MOST", "AT MOST")
QUANTIFIER_CHOICES = ( ("None","None"), ("All", "All"), ("Only", "Only"),
("More Than", "More Than"), ("Less Than", "Less Than"), ("Exactly","Exactly"),
("At Least", "At Least"), ("At Most", "At Most")
)
NUMBER_UNITS_CHOICES = (("",""),("m/s","m/s"), ("m/s^2","m/s^2"), ("m/s^3","m/s^3"), ("rad","rad"), ("rad/s","rad/s"), ("Hz","Hz"), ("METERS","METERS"), ("KILOMETERS","KILOMETERS"), ("VOLT","VOLT"), ("Number", "Number"))
#ITEM
......
......@@ -214,7 +214,7 @@ class BoilerplateData(models.Model):
class InferenceResults(models.Model):
owner_infer = models.ForeignKey(User, related_name='owner_infer', on_delete=models.CASCADE)
infer_group_of_boilerplate = models.ForeignKey(BoilerplateGroup, related_name='infer_owner_of_boilerplate', on_delete=models.CASCADE)
inference_data = models.CharField(default="", max_length=1000, blank=True)
inference_data = models.CharField(default="", max_length=1000000, blank=True)
class Meta:
ordering = ['owner_infer']
......
......@@ -6,7 +6,9 @@ from reqman.apps.reqtool.rest_api.services.parse_ontologies import *
from reqman.apps.reqtool.models.main_req import SYSTEM_CHOICES, FUNCTION_CHOICES, INTERFACE_CHOICES, FLOW_CHOICES, ITEM_CHOICES, STATE_CHOICES, STATE_SET_CHOICES
#HERE
Ontology_file = "../../Ontologies/Autonomy_v1.ttl"
#Ontology_file = "../../Ontologies/Autonomy_v1.ttl"
Ontology_file = "../../Ontologies/2022_AOCS.ttl"
list_of_inference_metrics = ["IncompleteRequirement", "AmbiguousRequirement",
"InconsistentRequirement","NoisyRequirement","OpaqueRequirement","RedundantRequirement"]
......@@ -20,6 +22,14 @@ def getclassofprefix(prefix):
def getclassofmain(main):
return(findclassofmain(main))
def getinstancefile(data):
for j in range(0,(len(out_list)-1),2):
if out_list[j]==data:
splitter = out_list[j].split(" : ")
instance_file = out_list[j+1]
break
return instance_file, splitter
def inferencing(project, boilerlate, prefix, main, suffix):
metrics = {}
......@@ -60,45 +70,71 @@ def exportboiltottl(project, prefix, boilerplate, main, suffix):
completeName = os.path.join(save_path, file_name)
per_instances = URIRef("http://delab.csd.auth.gr/ontologies/2018/RDO-instances#")
rdo = URIRef("http://delab.csd.auth.gr/ontologies/2018/RDO#")
sao = URIRef("http://delab.csd.auth.gr/ontologies/2018/SAO#")
rbo = URIRef("http://delab.csd.auth.gr/ontologies/2018/RBO#")
lo = URIRef("http://delab.csd.auth.gr/ontologies/2018/LO-instances#")
g = Graph()
g.load(Ontology_file, format="turtle")
#for b in boilerplate:
for i in range(len(boilerplate)):
title = boilerplate[i][0]['title']
#title = BNode()
g.add((per_instances + title, RDF.type, rdo + 'Requirement'))
if (boilerplate[i][0]['has_prefix'] == True):
if (boilerplate[i][0]['has_prefix'] == True): #If the requirement has Prefix
prefixmodel=["system_fun_item", "item_function_flow_statevalue"]
g.add((per_instances + title, rdo + 'hasPrefix', per_instances+(title+"_"+ prefix[i][0]['simple_prefix'])))
if (boilerplate[i][0]['has_main'] == True):
g.add((per_instances+(title+"_"+ prefix[i][0]['simple_prefix']), RDF.type, rbo + prefix[i][0]['simple_prefix']))
prefix_attr_splitter = prefix[i][0]['item_function_flow_statevalue'].split(" : ")
if (prefix_attr_splitter[0] == 'StateValue'):
g.add( (per_instances+(title+"_"+ prefix[i][0]['simple_prefix']), rbo+"isRelatedToLogicalExpression", (per_instances+("SystemStateValueContraint_"+title))))
g.add((per_instances+("SystemStateValueContraint_"+title), RDF.type, (per_instances+("SystemStateValueContraint"))))
g.add((per_instances+("SystemStateValueContraint_"+title), rbo+"isRelatedToState", lo + prefix_attr_splitter[1].strip()))
for j in prefixmodel:
if prefix[i][0][j] != "":
sub_instance_file, prefix_subject_splitter = getinstancefile(prefix[i][0][j])
g.add((per_instances+("SystemStateValueContraint_"+title), rbo+"isRelatedTo"+prefix_subject_splitter[0], URIRef(str(sub_instance_file))+"#"+ str(prefix_subject_splitter[len(prefix_subject_splitter) -1])))
else:
prefix_verb_splitter = prefix[i][0]['state_or_verb'].split(" : ")
g.add((per_instances+(title+"_"+ prefix[i][0]['simple_prefix']), rbo+"isRelatedToOccuringFunctionality", per_instances+(prefix_verb_splitter[0]+"OccuringFunctionality"+title)))
g.add((per_instances+(prefix_verb_splitter[0]+"OccuringFunctionality"+title), RDF.type, (per_instances+(prefix_verb_splitter[0]+"OccuringFunctionality"))))
g.add((per_instances+(prefix_verb_splitter[0]+"OccuringFunctionality"+title), rbo+"isRelatedToAction", lo + prefix_verb_splitter[1].strip()))
for j in prefixmodel:
if prefix[i][0][j] != "":
sub_instance_file, prefix_subject_splitter = getinstancefile(prefix[i][0][j])
g.add((per_instances+(prefix_verb_splitter[0]+"OccuringFunctionality"+title), rbo+"isRelatedTo"+prefix_subject_splitter[0], URIRef(str(sub_instance_file))+"#"+ str(prefix_subject_splitter[len(prefix_subject_splitter) -1])))
if (boilerplate[i][0]['has_main'] == True): #If the requirement has Main
g.add((per_instances + title, rdo + 'hasMain', per_instances+(title+"_"+ main[i][0]['main_choices'])))
g.add((per_instances+(title+"_"+ main[i][0]['main_choices']), RDF.type, rbo + main[i][0]['main_choices']))
if main[i][0]['sys_fun_inter'] != "":
#need to know in which file is the SUbject, RDO instances? or RDO-AOCS instance? or somewhere else?
for j in range(0,(len(out_list)-1),2):
if out_list[j]==main[i][0]['sys_fun_inter']:
spliter_1 = out_list[j].split(" : ")
instance_file = out_list[j+1]
break
g.add((per_instances+(title+"_"+ main[i][0]['main_choices']), rbo+"isRelatedToSubject", URIRef(str(instance_file))+"#"+ str(spliter_1[len(spliter_1) -1])))
instance_file, splitter_1 = getinstancefile(main[i][0]['sys_fun_inter'])
g.add((per_instances+(title+"_"+ main[i][0]['main_choices']), rbo+"isRelatedToSubject", URIRef(str(instance_file))+"#"+ str(splitter_1[len(splitter_1) -1])))
if main[i][0]['verb'] != "":
mainverb = main[i][0]['verb'].split(':')
g.add((per_instances+(title+"_"+ main[i][0]['main_choices']), rbo+"isRelatedToAction", lo + mainverb[1].strip()))
if main[i][0]['flow_function_interface_item_system_state_stateset'] != "":
for j in range(0,(len(out_list)-1),2):
if out_list[j]==main[i][0]['flow_function_interface_item_system_state_stateset']:
spliter_1 = out_list[j].split(" : ")
instance_file = out_list[j+1]
break
g.add((per_instances+(title+"_"+ main[i][0]['main_choices']), rbo+"isRelatedTo"+spliter_1[0], URIRef(str(instance_file))+"#"+ str(spliter_1[len(spliter_1) -1])))
mainmodel=["flow_function_interface_item_system_state_stateset", "state_item_before_verb", "statevalue_before_verb", "statevalue_system_connection_stateset"]
for j in mainmodel:
if main[i][0][j] != "":
instance_file, splitter_1 = getinstancefile(main[i][0][j])
g.add((per_instances+(title+"_"+ main[i][0]['main_choices']), rbo+"isRelatedTo"+splitter_1[0], URIRef(str(instance_file))+"#"+ str(splitter_1[len(splitter_1) -1])))
if main[i][0]['quantifier'] != "":
g.add((per_instances+(title+"_"+ main[i][0]['main_choices']), sao+"isRelatedTo", (per_instances+(title+"_"+ (main[i][0]['quantifier']).replace(" ", "")))))
per = URIRef('http://delab.csd.auth.gr/ontologies/2018/LO#'+ (main[i][0]['quantifier']).replace(" ", ""))
quant = URIRef(rbo + "Quantifier") # Initial Value if not any
for s, p, o in g.triples((per, RDFS.subClassOf, None)): #subClassOf
print(o)
if ('RBO' in o):
quant=o
g.add((per_instances+(title+"_"+ (main[i][0]['quantifier']).replace(" ", "")), RDF.type, quant))
g.add((per_instances+(title+"_"+ (main[i][0]['quantifier']).replace(" ", "")), rbo+"hasAdverb", lo + (main[i][0]['quantifier']).replace(" ", "")))
if main[i][0]['numerical'] != "":
g.add((per_instances+(title+"_"+ (main[i][0]['quantifier']).replace(" ", "")), rbo+"hasNumerical", URIRef(per_instances+(main[i][0]['quantifier']).replace(" ", "")+"_"+str(main[i][0]['numerical']))))
g.add((URIRef(per_instances+(main[i][0]['quantifier']).replace(" ", "")+"_"+str(main[i][0]['numerical'])), RDF.type, URIRef(rbo+"Numerical")))
if (boilerplate[i][0]['has_suffix'] == True):
g.add((per_instances + title, rdo + 'hasSuffix', per_instances+(title+"_"+ suffix[i][0]['suffix_choices'])))
#Need S1
#Need S2
#Need S3
g.add((per_instances+(title+"_"+ suffix[i][0]['suffix_choices']), RDF.type, URIRef(rbo+ suffix[i][0]['suffix_choices'])))
#if (boilerplate[i][0]['has_prefix'] == True):
# g.add((per_instances+(title+"_"+ main[i][0]['main_choices']), RDF.type, rbo + main[i][0]['main_choices']))
g.serialize(destination=save_path+file_name+'.ttl', format='turtle')
return (save_path+file_name)
#print(g.serialize(format="turtle").decode("utf-8"))
......
......@@ -7,7 +7,8 @@ from rdflib import ConjunctiveGraph, URIRef, RDFS, RDF, Namespace
#call("./../../Ontologies/s-get http://155.207.131.19:3030/Mokos_18_1_7_47/data default >> ../../Ontologies/data.ttl", shell=True)
#Ontology_file = "../../Ontologies/Mokos_18_1_7_47.ttl"
Ontology_file = "../../Ontologies/Autonomy_v1.ttl"
#Ontology_file = "../../Ontologies/Autonomy_v1.ttl"
Ontology_file = "../../Ontologies/2022_AOCS.ttl"
subClass_instances = "../../Ontologies/instances_subclass.txt"
......@@ -367,6 +368,8 @@ def get_subjects(bnodes_uriref):
if i in bnodes:
spliter = str(i).split("#")
subject_list.append(str(spliter[1]))
subject_list = list(filter(("nil").__ne__, subject_list))
subject_list = list(filter(("in").__ne__, subject_list))
return(subject_list)
......@@ -405,7 +408,7 @@ def check_quantifier(bnodes_uriref):
bnodes_uriref = [str(i) for i in bnodes_uriref]
quantity = [(rel_to_sub) for rel_to_sub in bnodes_uriref if "isRelatedToQuantity" in rel_to_sub]
uom = [(rel_to_sub) for rel_to_sub in bnodes_uriref if "isRelatedtoUOM" in rel_to_sub]
if quantity and uom:
if quantity or uom:
return True
......@@ -432,6 +435,8 @@ def get_attribute(bnodes_uriref, subjects, related_subjects, verbs):
attribute_list = list(filter(("Attribute").__ne__, attribute_list))
attribute_list = list(filter(("Quantity").__ne__, attribute_list))
attribute_list = list(filter(("UOM").__ne__, attribute_list))
attribute_list = list(filter(("nil").__ne__, attribute_list))
attribute_list = list(filter(("in").__ne__, attribute_list))
#discuss with kostas if this needed
attribute_list = list(filter(("TraversingConcept").__ne__, attribute_list))
......@@ -445,6 +450,7 @@ def get_attribute(bnodes_uriref, subjects, related_subjects, verbs):
tmp_list.append(tmp[1])
tmp_list = list(filter(("nil").__ne__, tmp_list))
tmp_list = list(filter(("in").__ne__, tmp_list))
tmp_list = list(filter(("Restriction").__ne__, tmp_list))
tmp_list = list(filter(("Class").__ne__, tmp_list))
tmp_list = list(filter(("Action").__ne__, tmp_list))
......@@ -487,6 +493,7 @@ def get_main_sytax_inference():
for objects in g.objects(subject=BNode(bn)):
if isinstance(objects, URIRef):
bnodes_uriref.append(objects)
#tmp_list = list(filter(("nil").__ne__, tmp_list))
main_dict[main] = {}
main_dict[main]["Subject"] = get_subjects(bnodes_uriref)
......@@ -495,7 +502,7 @@ def get_main_sytax_inference():
main_dict[main]["Quantifier"] = check_quantifier(bnodes_uriref)
main_dict[main]["Attributes"] = get_attribute(bnodes_uriref, main_dict[main]["Subject"], main_dict[main]["Related_to_Subject"], main_dict[main]["Verbs"])
print( main_dict[main]["Subject"])
return main_dict
......
......@@ -178,17 +178,18 @@ class PrefixDetailsAPIView(RetrieveUpdateDestroyAPIView):
def perform_update(self, serializer):
instance = serializer.validated_data
#add data in thn Model BoilerplateData
#print(instance)
es_instance = instance['system_fun_item'].rsplit(':', 1)
ev_instance =instance['state_or_verb'].rsplit(':', 1)
ei_instance = instance['item_function_flow_statevalue'].rsplit(':', 1)
ei_instance = instance['item_function_flow_statevalue'].rsplit(':', 1)
pr = (instance['prefix'] + ' ' + es_instance[len(es_instance)-1] + ' ' + ev_instance[len(ev_instance)-1] + ' ' + ei_instance[len(ei_instance)-1])
BoilerplateData.objects.filter(owner_data=instance['prefix_boilerplate'].owner, boilerplate_data_id = instance['prefix_boilerplate']).update(prefix_data = pr )
prefix_choices = inference.getclassofprefix(instance)
if prefix_choices[0] == 'ERROR':
raise APIException("Prefix choices not in [P1-P3]")
else:
serializer.save(simple_prefix = prefix_choices[0])
serializer.save(simple_prefix = prefix_choices)
#Custom actions when DELETE
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment