tp surete
This commit is contained in:
parent
bf19b0b29a
commit
6c365cfbbc
17 changed files with 1208 additions and 0 deletions
2
surete/tp_ltl/.gitignore
vendored
Normal file
2
surete/tp_ltl/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
/venv
|
||||
__pycache__
|
1
surete/tp_ltl/requirements.txt
Normal file
1
surete/tp_ltl/requirements.txt
Normal file
|
@ -0,0 +1 @@
|
|||
graphviz
|
12
surete/tp_ltl/run.sh
Executable file
12
surete/tp_ltl/run.sh
Executable file
|
@ -0,0 +1,12 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
cd "$(dirname "$(realpath "$0")")"
|
||||
|
||||
|
||||
if ! [ -d venv ]
|
||||
then ./setup.sh
|
||||
fi
|
||||
. venv/bin/activate
|
||||
|
||||
|
||||
python src/product.py
|
10
surete/tp_ltl/setup.sh
Executable file
10
surete/tp_ltl/setup.sh
Executable file
|
@ -0,0 +1,10 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
cd "$(dirname "$(realpath "$0")")"
|
||||
|
||||
|
||||
python -m venv venv
|
||||
. venv/bin/activate
|
||||
|
||||
|
||||
pip install -r requirements.txt
|
119
surete/tp_ltl/src/GBA.py
Normal file
119
surete/tp_ltl/src/GBA.py
Normal file
|
@ -0,0 +1,119 @@
|
|||
# -*-coding:UTF-8 -*
|
||||
from typing import Iterable, TypeVar
|
||||
from graphviz import Digraph
|
||||
|
||||
|
||||
from utils import edge, state
|
||||
|
||||
|
||||
def fstr(formula: frozenset[str]):
|
||||
form_str = ""
|
||||
for l in formula:
|
||||
form_str = form_str+l+','
|
||||
return form_str[:-1]
|
||||
|
||||
|
||||
def is_consistent(formula):
|
||||
for l in formula:
|
||||
if "~"+l in formula:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Generalized Buchi Automata
|
||||
class GBA[T]:
|
||||
|
||||
def __init__(self, all_states: Iterable[state[T]], initial_states: Iterable[state[T]], propositions: Iterable[str], edges: list[edge[T]], acceptings: Iterable[Iterable[state[T]]]):
|
||||
# States: a set of integers
|
||||
self.all_states = set(all_states)
|
||||
# Initial states: a set of integers
|
||||
self.initial_states = set(initial_states).intersection(self.all_states)
|
||||
# There must be an initial state; if there isn't, an initial state 0
|
||||
# is added
|
||||
if not self.initial_states:
|
||||
self.initial_states.add(0)
|
||||
self.all_states.add(0)
|
||||
self.accepting_states = list[set[state[T]]]()
|
||||
for final in acceptings:
|
||||
self.accepting_states.append(set(final).intersection(self.all_states))
|
||||
self.ap = set(propositions)
|
||||
self.formulas = {frozenset(formula) for (_,formula,_) in edges}
|
||||
self.next_states = {(state, formula): set()
|
||||
for state in self.all_states
|
||||
for formula in self.formulas}
|
||||
for edge in edges:
|
||||
if (edge[0] in self.all_states) and (edge[2] in self.all_states) and is_consistent(edge[1]):
|
||||
self.next_states[(edge[0], frozenset(edge[1]))].add(edge[2])
|
||||
self.labels = {state:str(state) for state in self.all_states}
|
||||
|
||||
# Adds a new state to the structure
|
||||
def add_state(self):
|
||||
add_state = max([s for s in self.all_states if type(s) is int]) + 1
|
||||
self.all_states.add(add_state)
|
||||
for formula in self.formulas:
|
||||
self.next_states[add_state,formula] = set()
|
||||
self.labels[add_state]= str(add_state)
|
||||
return add_state
|
||||
|
||||
def add_initial(self, state: int):
|
||||
self.initial_states.add(state)
|
||||
return state
|
||||
|
||||
# Adds a new edge from the state 'origin' to the state 'destination'
|
||||
def add_edge(self, origin, formula, destination):
|
||||
if origin in self.all_states:
|
||||
if destination in self.all_states:
|
||||
if not frozenset(formula) in self.formulas:
|
||||
self.formulas.add(frozenset(formula))
|
||||
for st in self.all_states:
|
||||
self.next_states[(st, frozenset(formula))] = set()
|
||||
self.next_states[(origin, frozenset(formula))].add(destination)
|
||||
else:
|
||||
raise KeyError("The destination " + str(destination) \
|
||||
+ " isn't a known state.")
|
||||
else:
|
||||
raise KeyError("The origin " + str(origin) \
|
||||
+ " isn't a known state.")
|
||||
|
||||
def change_accept(self, sets):
|
||||
self.accepting_states = []
|
||||
for s in sets:
|
||||
self.accepting_states.append(set(s))
|
||||
|
||||
|
||||
# Returns the set of all states reachable from the state 'origin'
|
||||
# with one transition
|
||||
def get_successors(self, origin, formula):
|
||||
if not origin in self.all_states:
|
||||
raise KeyError("The origin " + str(origin) \
|
||||
+ " isn't a known state.")
|
||||
return self.next_states[(origin, formula)]
|
||||
|
||||
# Exports a picture of the automaton to 'file'.pdf.
|
||||
def export(self, file: str):
|
||||
graph = Digraph(filename = file)
|
||||
graph.graph_attr['rankdir'] = 'LR'
|
||||
for state in self.all_states:
|
||||
node_shape = 'circle'
|
||||
graph.attr('node', shape = node_shape, style = 'filled', \
|
||||
fillcolor = 'white')
|
||||
graph.node(str(state), label = self.labels[state])
|
||||
if state in self.initial_states:
|
||||
graph.node(str(state) + "_i", style = "invisible")
|
||||
graph.edge(str(state) + "_i", str(state))
|
||||
for state in self.all_states:
|
||||
for formula in self.formulas:
|
||||
for next_state in self.next_states[(state, formula)]:
|
||||
if not formula:
|
||||
edge_label = "True"
|
||||
else:
|
||||
edge_label = fstr(formula)
|
||||
graph.edge(str(state), str(next_state), label=edge_label,
|
||||
color='black')
|
||||
graph.render()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
#exemple
|
||||
exempleCours=GBA([0,1],[0,1],["a","b"],[(0,["b"],0),(0,["a","b"],1),(1,["a","b"],1)],[[1]])
|
||||
exempleCours.export("GBAexemple")
|
391
surete/tp_ltl/src/LTL.py
Normal file
391
surete/tp_ltl/src/LTL.py
Normal file
|
@ -0,0 +1,391 @@
|
|||
# -*-coding:UTF-8 -*
|
||||
from graphviz import Digraph
|
||||
|
||||
|
||||
# A pseudo-LR(1) parser for simple regular expressions, based on the following
|
||||
# context-free grammar G:
|
||||
# [1] E -> 's'
|
||||
# where s is any sequence of characters not featuring ().+~XGFU
|
||||
# [2] E -> '('E')'
|
||||
# [3] E -> '~'E*
|
||||
# [4] E -> 'X'E*
|
||||
# [5] E -> 'G'E*
|
||||
# [6] E -> 'F'E*
|
||||
# [7] E -> E('U'E)*
|
||||
# [8] E -> E('.'E)*
|
||||
# [9] E -> E('+'E)*
|
||||
# The rules being ordered by decreasing priority.
|
||||
|
||||
|
||||
# A class for input tokens
|
||||
class token:
|
||||
|
||||
def __init__(self, type_t: str, value_t: (str | None) = None):
|
||||
# The string 'type' stands for the symbol in the grammar's terminals
|
||||
# matched to the token.
|
||||
self.type = type_t
|
||||
# The string 'value' stands for the value carried by tokens of type "s".
|
||||
self.value = value_t
|
||||
|
||||
|
||||
# A tree-based representation of regular expresssions
|
||||
class LTL:
|
||||
|
||||
def __init__(self, root: str, children: list["LTL"] = []):
|
||||
# The string attribute 'root' stands for the operator labelling the
|
||||
# root.
|
||||
self.root = root
|
||||
# The list attribute 'children'
|
||||
self.children = children
|
||||
|
||||
# Exports a picture of the regular expression tree to 'file'.pdf.
|
||||
def export(self, file: str):
|
||||
graph = Digraph(filename = file)
|
||||
graph.graph_attr['rankdir'] = 'TB'
|
||||
if self.children:
|
||||
graph.attr('node', shape = 'circle', style = 'filled', \
|
||||
fillcolor = 'white')
|
||||
graph.node("0", label = self.root)
|
||||
write_graph(self, "0", graph)
|
||||
else:
|
||||
graph.attr('node', shape = 'box', style = 'filled', \
|
||||
fillcolor = 'white')
|
||||
graph.node("0", label = self.root)
|
||||
graph.render()
|
||||
|
||||
|
||||
def copy(self):
|
||||
cpchildren = list[LTL]()
|
||||
for child in self.children:
|
||||
cpchildren.append(child.copy())
|
||||
return LTL(self.root, cpchildren)
|
||||
|
||||
def convertNNF(self):
|
||||
if self.root == "non":
|
||||
child = self.children[0]
|
||||
self.root = child.root
|
||||
self.children = child.children
|
||||
self.negativeNNF()
|
||||
else:
|
||||
for child in self.children:
|
||||
child.convertNNF()
|
||||
|
||||
def negativeNNF(self):
|
||||
if not self.children:
|
||||
self.children=[LTL(self.root, self.children)]
|
||||
self.root="non"
|
||||
elif self.root == "non":
|
||||
child = self.children[0]
|
||||
self.root = child.root
|
||||
self.children = child.children
|
||||
self.convertNNF()
|
||||
else:
|
||||
if self.root == "et":
|
||||
self.root = "ou"
|
||||
elif self.root == "ou":
|
||||
self.root = "et"
|
||||
elif self.root == "U":
|
||||
self.root = "R"
|
||||
elif self.root == "R":
|
||||
self.root = "U"
|
||||
elif self.root == "G":
|
||||
self.root = "F"
|
||||
elif self.root == "F":
|
||||
self.root = "G"
|
||||
for child in self.children:
|
||||
child.negativeNNF()
|
||||
|
||||
def equals(self, formula):
|
||||
if self.root != formula.root:
|
||||
return False
|
||||
for i in range(len(self.children)):
|
||||
if not (self.children[i].equals(formula.children[i])):
|
||||
return False
|
||||
return True
|
||||
|
||||
# Writes in the Graphviz graph 'graph' the tree structure of the regular
|
||||
# expression 'LTL', starting from its root 'root_string'.
|
||||
def write_graph(LTL, root_string: str, graph: Digraph):
|
||||
if LTL.children:
|
||||
index = 0
|
||||
for child in LTL.children:
|
||||
child_root = root_string + str(index)
|
||||
if not child.children:
|
||||
graph.attr('node', shape = 'box', style = 'filled', \
|
||||
fillcolor = 'white')
|
||||
lab = child.root if child.root else "<ε>"
|
||||
graph.node(child_root, label = lab)
|
||||
graph.edge(root_string, child_root)
|
||||
else:
|
||||
graph.attr('node', shape = 'circle', style = 'filled', \
|
||||
fillcolor = 'white')
|
||||
graph.node(child_root, label = child.root)
|
||||
graph.edge(root_string, child_root)
|
||||
write_graph(child, child_root, graph)
|
||||
index += 1
|
||||
|
||||
|
||||
# A class for terminal and non-terminal symbols the parser can push on the
|
||||
# stack
|
||||
class symbol:
|
||||
|
||||
def __init__(self, type_t: str, LTL_tree: (LTL | None) =None):
|
||||
# The string 'type' stands for the symbol in the grammar's vocabulary
|
||||
# matched to the symbol.
|
||||
self.type = type_t
|
||||
# The LTL 'tree' stands for the regular expression matched to the
|
||||
# symbol, based on the parsing operations applied so far.
|
||||
self.tree = LTL_tree
|
||||
|
||||
|
||||
# A class for the bottom-up parsing process
|
||||
class LTL_Parser:
|
||||
|
||||
def __init__(self, LTL_string: str):
|
||||
# The input string 'stream' from which tokens are generated
|
||||
self.stream = LTL_string
|
||||
# The list 'stack' that stands for the parsing stack.
|
||||
self.stack = list[symbol]()
|
||||
|
||||
# Returns the head token of the string attribute 'stream'; if the latter is
|
||||
# empty, returns the end of file token "$" instead.
|
||||
def look_ahead(self):
|
||||
if not self.stream:
|
||||
return token("$", "$")
|
||||
if self.stream[0] in ["(", ")", ".", "+", "X", "U", "~", "G", "F", "R"]:
|
||||
return token(self.stream[0], self.stream[0])
|
||||
else:
|
||||
index = 0
|
||||
while index < len(self.stream):
|
||||
if self.stream[index] in ["(", ")", ".", "+", "X", "U","~", "G", "F", "R"]:
|
||||
break
|
||||
index += 1
|
||||
return token("s", self.stream[0:index])
|
||||
|
||||
# Removes and returns the head token of the string attribute 'stream'; if
|
||||
# if the input stream is empty while expecting a token of type 'head_type',
|
||||
# raises an error.
|
||||
def pop_stream(self, head_type = ""):
|
||||
if not self.stream:
|
||||
if head_type:
|
||||
raise IndexError("End of string reached prematurely while " \
|
||||
"looking for token '" + head_type + "'.")
|
||||
else:
|
||||
raise IndexError("End of string reached prematurely.")
|
||||
stream_head = self.look_ahead()
|
||||
assert isinstance(stream_head, token)
|
||||
assert stream_head.value is not None
|
||||
self.stream = self.stream[len(stream_head.value):]
|
||||
return stream_head
|
||||
|
||||
# Checks if rule [1] can be applied.
|
||||
def check_string_rule(self):
|
||||
return (len(self.stack) > 0) and (self.stack[-1].type == "s")
|
||||
|
||||
# Checks if rule [2] can be applied.
|
||||
def check_par_rule(self):
|
||||
return (len(self.stack) > 2) and (self.stack[-1].type == ")") \
|
||||
and (self.stack[-2].type == "E") and (self.stack[-3].type == "(") \
|
||||
|
||||
# Checks if rule [3] can be applied.
|
||||
def check_not_rule(self, look_ahead_t):
|
||||
return (len(self.stack) > 1) and (self.stack[-2].type == "~") \
|
||||
and (self.stack[-1].type == "E")
|
||||
# Checks if rule [4] can be applied.
|
||||
def check_next_rule(self):
|
||||
return (len(self.stack) > 1) and (self.stack[-2].type == "X") \
|
||||
and (self.stack[-1].type == "E")
|
||||
|
||||
# Checks if rule [5] can be applied.
|
||||
def check_G_rule(self, look_ahead_t):
|
||||
return (len(self.stack) > 1) and (self.stack[-2].type == "G") \
|
||||
and (self.stack[-1].type == "E")
|
||||
|
||||
# Checks if rule [6] can be applied.
|
||||
def check_F_rule(self, look_ahead_t):
|
||||
return (len(self.stack) > 1) and (self.stack[-2].type == "F") \
|
||||
and (self.stack[-1].type == "E")
|
||||
|
||||
# Checks if rule [7] can be applied; the type of the next token
|
||||
# 'look_ahead_t' is required in order to solve shift-reduce conflicts
|
||||
# by applying the grammar's priority rules.
|
||||
def check_until_rule(self, look_ahead_t):
|
||||
return (len(self.stack) > 2) and (self.stack[-1].type == "E") \
|
||||
and (self.stack[-2].type == "U") and (self.stack[-3].type == "E") \
|
||||
and not (look_ahead_t in {"X","G","F","~"})
|
||||
|
||||
# Checks if rule [8] can be applied; the type of the next token
|
||||
# 'look_ahead_t' is required in order to solve shift-reduce conflicts
|
||||
# by applying the grammar's priority rules.
|
||||
def check_dot_rule(self, look_ahead_t):
|
||||
return (len(self.stack) > 2) and (self.stack[-1].type == "E") \
|
||||
and (self.stack[-2].type == ".") and (self.stack[-3].type == "E") \
|
||||
and not (look_ahead_t in {"X","G","F","~","U"})
|
||||
|
||||
# Checks if rule [9] can be applied; the type of the next token
|
||||
# 'look_ahead_t' is required in order to solve shift-reduce conflicts
|
||||
# by applying the grammar's priority rules.
|
||||
def check_plus_rule(self, look_ahead_t):
|
||||
return (len(self.stack) > 2) and (self.stack[-1].type == "E") \
|
||||
and (self.stack[-2].type == "+") and (self.stack[-3].type == "E") \
|
||||
and not (look_ahead_t in {"X","G","F","~","U","."})
|
||||
|
||||
# Checks if the implicit initial rule Z -> E'$' can be applied. Note that
|
||||
# the end of file symbol is never pushed on the stack; it merely appears
|
||||
# as an incoming token of type 'look_ahead_t' instead. Moreover, the stack
|
||||
# must consist in a single non-terminal symbol E in order to avoid
|
||||
# accepting with a non-empty stack.
|
||||
def check_initial_rule(self, look_ahead_t):
|
||||
return (len(self.stack) == 1) and (self.stack[0].type == "E") \
|
||||
and (look_ahead_t == "$")
|
||||
|
||||
# Applies rule [1].
|
||||
def apply_string_rule(self):
|
||||
tree = self.stack[-1].tree
|
||||
self.stack = self.stack[:-1]
|
||||
self.stack.append(symbol("E", tree))
|
||||
|
||||
# Applies rule [2].
|
||||
def apply_par_rule(self):
|
||||
tree = self.stack[-2].tree
|
||||
self.stack = self.stack[:-3]
|
||||
self.stack.append(symbol("E", tree))
|
||||
|
||||
# Applies rule [3].
|
||||
def apply_not_rule(self):
|
||||
tree = self.stack[-1].tree
|
||||
assert tree is not None
|
||||
self.stack = self.stack[:-2]
|
||||
self.stack.append(symbol("E", LTL("non", [tree])))
|
||||
|
||||
# Applies rule [4].
|
||||
def apply_next_rule(self):
|
||||
tree = self.stack[-1].tree
|
||||
assert tree is not None
|
||||
self.stack = self.stack[:-2]
|
||||
self.stack.append(symbol("E", LTL("X", [tree])))
|
||||
|
||||
# Applies rule [5].
|
||||
def apply_G_rule(self):
|
||||
tree = self.stack[-1].tree
|
||||
assert tree is not None
|
||||
self.stack = self.stack[:-2]
|
||||
self.stack.append(symbol("E", LTL("G", [tree])))
|
||||
|
||||
# Applies rule [6].
|
||||
def apply_F_rule(self):
|
||||
tree = self.stack[-1].tree
|
||||
assert tree is not None
|
||||
self.stack = self.stack[:-2]
|
||||
self.stack.append(symbol("E", LTL("F", [tree])))
|
||||
|
||||
# Applies rule [7].
|
||||
def apply_until_rule(self):
|
||||
right_tree = self.stack[-1].tree
|
||||
left_tree = self.stack[-3].tree
|
||||
assert right_tree is not None
|
||||
assert left_tree is not None
|
||||
self.stack = self.stack[:-3]
|
||||
self.stack.append(symbol("E", LTL("U", [left_tree, right_tree])))
|
||||
|
||||
# Applies rule [8].
|
||||
def apply_dot_rule(self):
|
||||
right_tree = self.stack[-1].tree
|
||||
left_tree = self.stack[-3].tree
|
||||
assert right_tree is not None
|
||||
assert left_tree is not None
|
||||
self.stack = self.stack[:-3]
|
||||
self.stack.append(symbol("E", LTL("et", [left_tree, right_tree])))
|
||||
|
||||
# Applies rule [9].
|
||||
def apply_plus_rule(self):
|
||||
right_tree = self.stack[-1].tree
|
||||
left_tree = self.stack[-3].tree
|
||||
assert right_tree is not None
|
||||
assert left_tree is not None
|
||||
self.stack = self.stack[:-3]
|
||||
self.stack.append(symbol("E", LTL("ou", [left_tree, right_tree])))
|
||||
|
||||
# Shifts the incoming token of type 'look_ahead_t' on the stack.
|
||||
def shift(self, look_ahead_t):
|
||||
new_token = self.pop_stream(look_ahead_t)
|
||||
# String tokens carry a simple regular expression tree reduced to
|
||||
# a single node.
|
||||
if new_token.type == "s":
|
||||
if new_token.value == "ε":
|
||||
self.stack.append(symbol("s", LTL("")))
|
||||
else:
|
||||
assert new_token.value is not None
|
||||
self.stack.append(symbol("s", LTL(new_token.value)))
|
||||
# Other terminal symbols do not carry syntactic trees.
|
||||
else:
|
||||
self.stack.append(symbol(new_token.type))
|
||||
|
||||
# The parser itself. It either returns a LTL tree or raises a SyntaxError
|
||||
# once it has read the entire stream yet cannot perform any reduction.
|
||||
# The stack trace is shown if the Boolean 'trace' is true.
|
||||
def parse(self, trace = False):
|
||||
while True:
|
||||
look_ahead = self.look_ahead()
|
||||
# Shows the stack trace for debugging purposes.
|
||||
if trace:
|
||||
for symb in self.stack:
|
||||
print(symb.type, end=";")
|
||||
print()
|
||||
# First tries to apply the reduction rules in their matching
|
||||
# priority order.
|
||||
if self.check_string_rule():
|
||||
self.apply_string_rule()
|
||||
elif self.check_par_rule():
|
||||
self.apply_par_rule()
|
||||
elif self.check_next_rule():
|
||||
self.apply_next_rule()
|
||||
elif self.check_not_rule(look_ahead.type):
|
||||
self.apply_not_rule()
|
||||
elif self.check_G_rule(look_ahead.type):
|
||||
self.apply_G_rule()
|
||||
elif self.check_F_rule(look_ahead.type):
|
||||
self.apply_F_rule()
|
||||
elif self.check_until_rule(look_ahead.type):
|
||||
self.apply_until_rule()
|
||||
elif self.check_dot_rule(look_ahead.type):
|
||||
self.apply_dot_rule()
|
||||
elif self.check_plus_rule(look_ahead.type):
|
||||
self.apply_plus_rule()
|
||||
# If the initial reduction can be performed, the parsing process
|
||||
# succeeds.
|
||||
elif self.check_initial_rule(look_ahead.type):
|
||||
return self.stack[0].tree
|
||||
# It otherwise shifts a token from the input stream, assuming it is
|
||||
# not empty.
|
||||
elif look_ahead.type != "$":
|
||||
self.shift(look_ahead.type)
|
||||
# The parser can no longer shift or reduce, hence, fails.
|
||||
else:
|
||||
raise SyntaxError("Can no longer shift a symbol or reduce" \
|
||||
" a rule.")
|
||||
|
||||
|
||||
# Returns a tree of type LTL representing the string 'LTL_string'.
|
||||
def new_LTL(LTL_string: str):
|
||||
parsed = LTL_Parser(LTL_string).parse()
|
||||
assert parsed is not None
|
||||
return parsed
|
||||
|
||||
|
||||
# Tests
|
||||
if __name__ == "__main__":
|
||||
P1 = LTL_Parser("a")
|
||||
P1_tokens = list[str]()
|
||||
while P1.stream:
|
||||
head = P1.look_ahead().value
|
||||
assert head is not None
|
||||
P1_tokens.append(head)
|
||||
P1.pop_stream()
|
||||
head = P1.look_ahead().value
|
||||
assert head is not None
|
||||
P1_tokens.append(head)
|
||||
print(P1_tokens)
|
||||
formule = new_LTL("~(a+X~a+Gb.F~c)")
|
||||
formule.convertNNF()
|
||||
formule.export("test")
|
34
surete/tp_ltl/src/LTLchecker.py
Normal file
34
surete/tp_ltl/src/LTLchecker.py
Normal file
|
@ -0,0 +1,34 @@
|
|||
from GBA import *
|
||||
from kripke import *
|
||||
from LTL import *
|
||||
from LTLtoGBA import *
|
||||
from product import *
|
||||
from emptyTest import *
|
||||
|
||||
#Entrées : une formule LTL et une structure de Kripke et un ensemble de propositions atomiques
|
||||
#Sortie : renvoie True si ttes les traces de la structure satisfont la formule faux sinon
|
||||
def satisfait(formule: LTL, kripke: Kripke, /, *_):
|
||||
neg = LTL("non", [formule]).convertNNF()
|
||||
propositions = kripke.ap
|
||||
gba1 = buildBuchi(neg, propositions)
|
||||
gba2 = kripke.toGBA()
|
||||
produit = product(gba1,gba2)
|
||||
return is_GBA_empty(produit)
|
||||
|
||||
if __name__ == "__main__":
|
||||
#machine à laver
|
||||
propositions = ["b","f","d","v"]
|
||||
mlaver=Kripke([0,1,2,3,4],[0],["b","f","d","v"],[(0,1),(0,2),(1,3),(1,0),(2,3),(3,2),(3,4),(4,4)],{0:set(),1:{"f"},2:{"b"},3:{"b","f"},4:{"b","f","d","v"}})
|
||||
mlaver.export("mlaver")
|
||||
#Test avec formule1, doit être faux
|
||||
formule1 = new_LTL("F(d)")
|
||||
satisfait(formule1, mlaver, ["b","f","d","v"])
|
||||
#Test avec formule2, doit être vrai
|
||||
formule2 = new_LTL("G(~d+f)")
|
||||
satisfait(formule2, mlaver, ["b","f","d","v"])
|
||||
#Test avec formule3, doit être vrai
|
||||
formule3 = new_LTL("G(~d+G(d))")
|
||||
satisfait(formule3, mlaver, ["b","f","d","v"])
|
||||
#Test avec formule4, doit être vrai
|
||||
formule4 = new_LTL("G(~d.~v)+F(d.v))")
|
||||
satisfait(formule4, mlaver, ["b","f","d","v"])
|
260
surete/tp_ltl/src/LTLtoGBA.py
Normal file
260
surete/tp_ltl/src/LTLtoGBA.py
Normal file
|
@ -0,0 +1,260 @@
|
|||
# -*-coding:UTF-8 -*
|
||||
from LTL import *
|
||||
from GBA import *
|
||||
|
||||
|
||||
def equals(formules1: list[LTL], formules2: list[LTL]):
|
||||
difference = formules1.copy()
|
||||
for form1 in formules2:
|
||||
equals = False
|
||||
for form2 in formules1:
|
||||
if form1.equals(form2):
|
||||
difference.remove(form2)
|
||||
equals = True
|
||||
if not equals:
|
||||
return False
|
||||
return not difference
|
||||
|
||||
|
||||
def findall(val: list[tuple[list[LTL], int]], formule: list[LTL]):
|
||||
result = set()
|
||||
for couple in val:
|
||||
if equals(formule,couple[0]):
|
||||
result.add(couple[1])
|
||||
return result
|
||||
|
||||
|
||||
def clean(formules: Iterable[LTL]):
|
||||
new = list[LTL]()
|
||||
for form1 in formules:
|
||||
toAdd = True
|
||||
for form2 in new:
|
||||
if form1.equals(form2):
|
||||
toAdd = False
|
||||
if toAdd:
|
||||
new.append(form1)
|
||||
formules = new
|
||||
return new
|
||||
|
||||
|
||||
def copy(formules: Iterable[LTL]):
|
||||
result = list[LTL]()
|
||||
for formule in formules:
|
||||
result.append(formule.copy())
|
||||
return result
|
||||
|
||||
|
||||
def is_in(formule, formules):
|
||||
for formule2 in formules:
|
||||
if formule.equals(formule2):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def duplicate(state: int, structure: GBA, todo: dict[int, list[LTL]], val: list[tuple[list[LTL], int]], toExpand: list[int]):
|
||||
statecp=structure.add_state()
|
||||
if state in structure.initial_states:
|
||||
structure.add_initial(statecp)
|
||||
update_val(state, statecp, val)
|
||||
todo[statecp]=copy(todo[state])
|
||||
todo[state]=copy(todo[state])
|
||||
toExpand.append(statecp)
|
||||
return statecp
|
||||
|
||||
|
||||
def update_val(state, statecp, val):
|
||||
for couple in val:
|
||||
if couple[1]==state:
|
||||
val.append((couple[0],statecp))
|
||||
return val
|
||||
|
||||
|
||||
def traitET(formule: LTL, i: int, structure: GBA, todo: dict[int, list[LTL]], state: int, val: list[tuple[list[LTL], int]], toExpand: list[int]):
|
||||
"""
|
||||
Applies a 'AND' node of the LTL formula to the val.
|
||||
"""
|
||||
child1 = formule.children[0]
|
||||
child2 = formule.children[1]
|
||||
formulecp = copy(todo[state])
|
||||
todo[state] = formulecp
|
||||
formulecp[i] = child1
|
||||
formulecp.append(child2)
|
||||
val.append((formulecp,state))
|
||||
return True
|
||||
|
||||
|
||||
def traitG(formule: LTL, i: int, structure: GBA, todo: dict[int, list[LTL]], state: int, val: list[tuple[list[LTL], int]], toExpand: list[int]):
|
||||
child = formule.children[0]
|
||||
formulecp = todo[state].copy()
|
||||
todo[state] = formulecp
|
||||
formulecp.append(child)
|
||||
formulecp[i] = LTL("X", [formule])
|
||||
val.append((formulecp,state))
|
||||
return True
|
||||
|
||||
|
||||
def traitOU(formule: LTL, i: int, structure: GBA, todo: dict[int, list[LTL]], state: int, val: list[tuple[list[LTL], int]], toExpand: list[int]):
|
||||
#on duplique l'état considéré pour avoir deux choix de transitions possibles
|
||||
state2 = duplicate(state, structure, todo, val, toExpand)
|
||||
#on récupère les formules à droite et à gauche du OU
|
||||
child1 = formule.children[0]
|
||||
child2 = formule.children[1]
|
||||
#on modifie les formules courantes pour avoir dans un état child1 et dans l'autre child2
|
||||
todo[state][i] = child1.copy()
|
||||
todo[state2][i] = child2.copy()
|
||||
#on ajoute le fait que state et state2 satisfont le nouvel ensemble de formules
|
||||
val.append((todo[state], state))
|
||||
val.append((todo[state2],state2))
|
||||
return True
|
||||
|
||||
|
||||
def traitF(formule: LTL, i: int, structure: GBA, todo: dict[int, list[LTL]], state: int, val: list[tuple[list[LTL], int]], toExpand: list[int]):
|
||||
#on duplique l'état considéré pour avoir deux choix de transitions possibles
|
||||
state2 = duplicate(state, structure, todo, val, toExpand)
|
||||
#on récupère la formule sous le F
|
||||
child = formule.children[0]
|
||||
#on construit la formule avec XF
|
||||
nextF = LTL("X", [formule])
|
||||
#on modifie les formules courantes pour avoir dans un état child et dans l'autre XF(child)
|
||||
todo[state2][i] = child
|
||||
todo[state][i] = nextF
|
||||
#on ajoute le fait que state et state2 satisfont le nouvel ensemble de formules
|
||||
val.append((todo[state], state))
|
||||
val.append((todo[state2],state2))
|
||||
return True
|
||||
|
||||
|
||||
def traitU(formule: LTL, i: int, structure: GBA, todo: dict[int, list[LTL]], state: int, val: list[tuple[list[LTL], int]], toExpand: list[int]):
|
||||
#on duplique l'état considéré pour avoir deux choix de transitions possibles
|
||||
state2 = duplicate(state, structure, todo, val, toExpand)
|
||||
#on récupère les formules à droite et à gauche du U
|
||||
child1 = formule.children[0]
|
||||
child2 = formule.children[1]
|
||||
#on construit la formule avec X..U..
|
||||
nextU = LTL("X", [formule])
|
||||
#on modifie les formules courantes
|
||||
|
||||
#à compléter
|
||||
todo[state2][i] = child2
|
||||
todo[state][i] = nextU
|
||||
todo[state].append(child1.copy())
|
||||
|
||||
#on ajoute le fait que state et state2 satisfont le nouvel ensemble de formules
|
||||
val.append((todo[state], state))
|
||||
val.append((todo[state2],state2))
|
||||
return True
|
||||
|
||||
|
||||
def traitement(done: bool, i: int, structure: GBA, todo: dict[int, list[LTL]], state: int, val: list[tuple[list[LTL], int]], toExpand: list[int]):
|
||||
"""
|
||||
Applies one step of the todo collection of LTL formulas.
|
||||
"""
|
||||
formule = todo[state][i]
|
||||
if formule.root == "et":
|
||||
return traitET(formule, i, structure, todo, state, val, toExpand)
|
||||
elif formule.root == "ou":
|
||||
return traitOU(formule, i, structure, todo, state, val, toExpand)
|
||||
elif formule.root == "F":
|
||||
return traitF(formule, i, structure, todo, state, val, toExpand)
|
||||
elif formule.root == "G":
|
||||
return traitG(formule, i, structure, todo, state, val, toExpand)
|
||||
elif formule.root == "U":
|
||||
return traitU(formule, i, structure, todo, state, val, toExpand)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def buildBuchi(formule: LTL, propositions: list[str]):
|
||||
"""
|
||||
Builds a buchi state machine.
|
||||
|
||||
From the LTL formula and the possible propositions.
|
||||
"""
|
||||
structure = GBA([0],[0],propositions,[],[])
|
||||
val=[([formule], 0)]
|
||||
todo = {0:[formule]}
|
||||
expandState(structure, todo, 0, val, [0])
|
||||
accept_conditions(structure, todo, val)
|
||||
return structure
|
||||
|
||||
|
||||
def expandState(structure: GBA, todo: dict[int, list[LTL]], state: int, val: list[tuple[list[LTL], int]], toExpand: list[int]):
|
||||
"""
|
||||
Creates all states from a collection of LTL transitions formulas (in todo).
|
||||
"""
|
||||
done = False
|
||||
i = 0
|
||||
while not done and i < len(todo[state]):
|
||||
done = traitement(done, i, structure, todo, state, val, toExpand)
|
||||
i=i+1
|
||||
if not done:
|
||||
toExpand = toExpand[1:]
|
||||
if toExpand:
|
||||
expandState(structure, todo, toExpand[0], val, toExpand)
|
||||
else:
|
||||
computeSucc(structure, todo, val)
|
||||
return val
|
||||
|
||||
|
||||
def computeSucc(structure: GBA, todo: dict[int, list[LTL]], val: list[tuple[list[LTL], int]]):
|
||||
states = structure.all_states.copy()
|
||||
for state in states:
|
||||
succ = findall(val, next(todo[state]))
|
||||
if not succ:
|
||||
new = structure.add_state()
|
||||
val.append((next(todo[state]),new))
|
||||
todo[new]=next(todo[state])
|
||||
expandState(structure, todo, new, val, [new])
|
||||
succ = findall(val, next(todo[state]))
|
||||
label = now(todo[state])
|
||||
if is_consistent(label):
|
||||
for s in succ:
|
||||
structure.add_edge(state,label, s)
|
||||
|
||||
|
||||
def accept_conditions(structure: GBA, todo: dict[int, list[LTL]], val: list[tuple[list[LTL], int]]):
|
||||
cond = list[LTL]()
|
||||
for couple in val:
|
||||
for formule in couple[0]:
|
||||
if formule.root in {"F","U"} and not is_in(formule,cond):
|
||||
cond.append(formule)
|
||||
sets = list[list[int]]()
|
||||
for i in range(len(cond)):
|
||||
formule = cond[i]
|
||||
sets.append([])
|
||||
for state in structure.all_states:
|
||||
if not is_in(LTL("X", [formule]), todo[state]):
|
||||
sets[i].append(state)
|
||||
structure.change_accept(sets)
|
||||
return structure
|
||||
|
||||
|
||||
def next(formules: Iterable[LTL]):
|
||||
retour = list[LTL]()
|
||||
for formule in formules:
|
||||
if formule.root == "X":
|
||||
retour.append(formule.children[0])
|
||||
return retour
|
||||
|
||||
|
||||
def now(formules: Iterable[LTL]):
|
||||
retour = list[str]()
|
||||
for formule in formules:
|
||||
if formule.root == "non":
|
||||
retour.append("~"+formule.children[0].root)
|
||||
if not formule.children:
|
||||
assert type(formule.root) is str
|
||||
retour.append(formule.root)
|
||||
return retour
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
formule = new_LTL("G(a).Fb")
|
||||
trad = buildBuchi(formule, ["a","b"])
|
||||
trad.export("trad")
|
||||
print(trad.accepting_states)
|
||||
|
||||
formule = new_LTL("aUb")
|
||||
trad = buildBuchi(formule, ["a","b"])
|
||||
trad.export("trad_union")
|
||||
print(trad.accepting_states)
|
132
surete/tp_ltl/src/buchi.py
Normal file
132
surete/tp_ltl/src/buchi.py
Normal file
|
@ -0,0 +1,132 @@
|
|||
# -*-coding:UTF-8 -*
|
||||
from graphviz import Digraph
|
||||
from utils import edge
|
||||
|
||||
|
||||
def fstr(formula):
|
||||
form_str = ""
|
||||
for l in formula:
|
||||
form_str = form_str+l+','
|
||||
return form_str[:-1]
|
||||
|
||||
def is_consistent(formula):
|
||||
for l in formula:
|
||||
if "~"+l in formula:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Buchi automata
|
||||
class Buchi:
|
||||
|
||||
def __init__(self, all_states: list[int], initial_states: list[int], propositions: list[str], edges: list[edge], accepting: list[int]):
|
||||
|
||||
# States: a set of integers
|
||||
self.all_states = set(all_states)
|
||||
# Initial states: a set of integers
|
||||
self.initial_states = set(initial_states).intersection(self.all_states)
|
||||
# There must be an initial state; if there isn't, an initial state 0
|
||||
# is added
|
||||
if not self.initial_states:
|
||||
self.initial_states.add(0)
|
||||
self.all_states.add(0)
|
||||
#Defining the set of accepting states
|
||||
self.accepting_states = set(accepting)
|
||||
#Defining the set of propositionnal variables
|
||||
self.ap = set(propositions)
|
||||
#We build the set of formulas appearing in transitions
|
||||
#A formula is represented by a set of litterals ie a variable or its negation
|
||||
if edges:
|
||||
self.formulas = {frozenset(formula) for (_,formula,_) in edges}
|
||||
else:
|
||||
self.formulas = set()
|
||||
self.next_states = {(state, formula): set()
|
||||
for state in self.all_states
|
||||
for formula in self.formulas}
|
||||
for edge in edges:
|
||||
if (edge[0] in self.all_states) and (edge[2] in self.all_states) and is_consistent(edge[1]):
|
||||
self.next_states[(edge[0], frozenset(edge[1]))].add(edge[2])
|
||||
self.labels = {state:str(state) for state in self.all_states}
|
||||
|
||||
# Adds a new state to the structure
|
||||
def add_state(self):
|
||||
add_state = max(self.all_states) + 1
|
||||
self.all_states.add(add_state)
|
||||
for formula in self.formulas:
|
||||
self.next_states[add_state,formula] = set()
|
||||
self.labels[add_state]= str(add_state)
|
||||
return add_state
|
||||
|
||||
def add_initial(self, state):
|
||||
self.initial_states.add(state)
|
||||
return state
|
||||
|
||||
# # Removes the state 'state' from the structure if it exists.
|
||||
# def remove_state(self, state):
|
||||
# if state in self.all_states:
|
||||
# self.all_states.remove(state)
|
||||
# if state in self.initial_states:
|
||||
# self.initial_states.remove(state)
|
||||
# del self.labels[state]
|
||||
# del self.valuation[state]
|
||||
# # Remove the successors of 'state', but also all the edges
|
||||
# # leading to 'state'
|
||||
# del self.next_states[state]
|
||||
# for origin in self.all_states:
|
||||
# if state in self.next_states[origin]:
|
||||
# self.next_states[origin].remove(state)
|
||||
|
||||
|
||||
# Adds a new edge from the state 'origin' to the state 'destination'
|
||||
def add_edge(self, origin, formula, destination):
|
||||
if origin in self.all_states:
|
||||
if destination in self.all_states:
|
||||
if not frozenset(formula) in self.formulas:
|
||||
self.formulas.add(frozenset(formula))
|
||||
for st in self.all_states:
|
||||
self.next_states[(st, frozenset(formula))] = set()
|
||||
self.next_states[(origin, frozenset(formula))].add(destination)
|
||||
else:
|
||||
raise KeyError("The destination " + str(destination) \
|
||||
+ " isn't a known state.")
|
||||
else:
|
||||
raise KeyError("The origin " + str(origin) \
|
||||
+ " isn't a known state.")
|
||||
|
||||
|
||||
# Returns the set of all states reachable from the state 'origin'
|
||||
# with one transition
|
||||
def get_successors(self, origin, formula):
|
||||
if not origin in self.all_states:
|
||||
raise KeyError("The origin " + str(origin) \
|
||||
+ " isn't a known state.")
|
||||
return self.next_states[(origin, formula)]
|
||||
|
||||
# Exports a picture of the automaton to 'file'.pdf.
|
||||
def export(self, file):
|
||||
graph = Digraph(filename = file)
|
||||
graph.graph_attr['rankdir'] = 'LR'
|
||||
for state in self.all_states:
|
||||
node_shape = 'circle'
|
||||
if state in self.accepting_states:
|
||||
node_shape = 'doublecircle'
|
||||
graph.attr('node', shape = node_shape, style = 'filled', \
|
||||
fillcolor = 'white')
|
||||
graph.node(str(state), label = self.labels[state])
|
||||
if state in self.initial_states:
|
||||
graph.node(str(state) + "_i", style = "invisible")
|
||||
graph.edge(str(state) + "_i", str(state))
|
||||
for state in self.all_states:
|
||||
for formula in self.formulas:
|
||||
for next_state in self.next_states[(state, formula)]:
|
||||
if not formula:
|
||||
edge_label = "True"
|
||||
else:
|
||||
edge_label = fstr(formula)
|
||||
graph.edge(str(state), str(next_state), label=edge_label,
|
||||
color='black')
|
||||
graph.render()
|
||||
|
||||
#exemple
|
||||
exempleCours=Buchi([0,1],[0,1],["a","b"],[(0,["b"],0),(0,["a","b"],1),(1,["a","b"],1)],[1])
|
||||
exempleCours.export("BuchiExemple")
|
45
surete/tp_ltl/src/emptyTest.py
Normal file
45
surete/tp_ltl/src/emptyTest.py
Normal file
|
@ -0,0 +1,45 @@
|
|||
from GBA import GBA
|
||||
|
||||
|
||||
def dfs(gba : GBA, s1, s2, sets, eactuel, index, past):
|
||||
if index == len(sets):
|
||||
return eactuel == s2
|
||||
past.add((eactuel, index))
|
||||
for ns in gba.transitions[eactuel]:
|
||||
if ns in sets[index] and (ns, index + 1) not in past:
|
||||
return dfs(gba, s1, s2, ns, index + 1, past)
|
||||
if (ns, index) not in past:
|
||||
return dfs(gba, s1, s2, ns, index, past)
|
||||
return False
|
||||
|
||||
|
||||
def path(gba: GBA, s1, s2, sets):
|
||||
past = set()
|
||||
return dfs(gba, s1,s2, s1, 0, past)
|
||||
|
||||
#Entrée : une formule sous forme de liste de littéraux (ie une variable ou sa négation)
|
||||
#Vérifie que cette formule est consistente cad qu'elle ne contient pas à la fois une variable et sa négation
|
||||
def is_consistent(formula):
|
||||
for l in formula:
|
||||
if "~"+l in formula:
|
||||
return False
|
||||
return True
|
||||
|
||||
#Entrées : 1 automate de buchi
|
||||
#Sortie : renvoie True si cet automate n'admet aucune exécution et false sinon
|
||||
def is_Buchi_empty(buchi):
|
||||
#à compléter
|
||||
return False
|
||||
|
||||
#Entrées : 1 automate de buchi généralisé
|
||||
#Sortie : renvoie True si cet automate n'admet aucune exécution et false sinon
|
||||
def is_GBA_empty(gba):
|
||||
#à compléter
|
||||
for init in gba.initial_state:
|
||||
for accept in gba.accepting_sets:
|
||||
for acc in accept:
|
||||
if path(gba, init, acc, gba.accepting_sets):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
101
surete/tp_ltl/src/kripke.py
Executable file
101
surete/tp_ltl/src/kripke.py
Executable file
|
@ -0,0 +1,101 @@
|
|||
# -*-coding:UTF-8 -*
|
||||
from graphviz import Digraph
|
||||
from GBA import *
|
||||
|
||||
|
||||
KripkeEdge = tuple[int, int]
|
||||
|
||||
|
||||
# Kripke structure
|
||||
class Kripke:
|
||||
|
||||
def __init__(self, all_states: list[int], initial_states: list[int], propositions: list[str], edges: list[KripkeEdge], valuations: dict[int, set[str]]):
|
||||
|
||||
# States: a set of integers
|
||||
self.all_states = set(all_states)
|
||||
# Initial states: a set of integers
|
||||
self.initial_states = set(initial_states).intersection(self.all_states)
|
||||
# There must be an initial state; if there isn't, an initial state 0
|
||||
# is added
|
||||
if not self.initial_states:
|
||||
self.initial_states.add(0)
|
||||
self.all_states.add(0)
|
||||
self.ap = propositions
|
||||
self.next_states = {state: set[int]()
|
||||
for state in self.all_states}
|
||||
for edge in set(edges):
|
||||
if (edge[0] in self.all_states) and (edge[1] in self.all_states):
|
||||
self.next_states[edge[0]].add(edge[1])
|
||||
self.valuation = valuations
|
||||
self.labels = {state:str(self.valuation[state]) for state in self.all_states}
|
||||
|
||||
# Adds a new state to the structure
|
||||
def add_state(self, val):
|
||||
add_state = max(self.all_states) + 1
|
||||
self.all_states.add(add_state)
|
||||
self.next_states[add_state] = set()
|
||||
self.valuation[add_state] = val
|
||||
self.labels[add_state]= str(val)
|
||||
return add_state
|
||||
|
||||
# Removes the state 'state' from the structure if it exists.
|
||||
def remove_state(self, state):
|
||||
if state in self.all_states:
|
||||
self.all_states.remove(state)
|
||||
if state in self.initial_states:
|
||||
self.initial_states.remove(state)
|
||||
del self.labels[state]
|
||||
del self.valuation[state]
|
||||
# Remove the successors of 'state', but also all the edges
|
||||
# leading to 'state'
|
||||
del self.next_states[state]
|
||||
for origin in self.all_states:
|
||||
if state in self.next_states[origin]:
|
||||
self.next_states[origin].remove(state)
|
||||
|
||||
|
||||
# Returns the set of all states reachable from the state 'origin'
|
||||
# with one transition
|
||||
def get_successors(self, origin):
|
||||
if not origin in self.all_states:
|
||||
raise KeyError("The origin " + str(origin) \
|
||||
+ " isn't a known state.")
|
||||
return self.next_states[origin]
|
||||
|
||||
# Exports a picture of the automaton to 'file'.pdf.
|
||||
def export(self, file):
|
||||
graph = Digraph(filename = file)
|
||||
graph.graph_attr['rankdir'] = 'LR'
|
||||
for state in self.all_states:
|
||||
node_shape = 'circle'
|
||||
graph.attr('node', shape = node_shape, style = 'filled', \
|
||||
fillcolor = 'white')
|
||||
graph.node(str(state), label = self.labels[state])
|
||||
if state in self.initial_states:
|
||||
graph.node(str(state) + "_i", style = "invisible")
|
||||
graph.edge(str(state) + "_i", str(state))
|
||||
for state in self.all_states:
|
||||
for next_state in self.next_states[state]:
|
||||
edge_colour = 'black'
|
||||
edge_label = ""
|
||||
graph.edge(str(state), str(next_state), label=edge_label,
|
||||
color=edge_colour)
|
||||
graph.render()
|
||||
|
||||
|
||||
def toGBA(self):
|
||||
edges = list[tuple[int, list[str], int]]()
|
||||
for state in self.all_states:
|
||||
val = list[str]()
|
||||
for prop in self.ap:
|
||||
if prop in self.valuation[state]:
|
||||
val.append(prop)
|
||||
else:
|
||||
val.append("~"+prop)
|
||||
for next_state in self.next_states[state]:
|
||||
edges.append((state, val ,next_state))
|
||||
return GBA(self.all_states, self.initial_states, self.ap, edges, [self.all_states])
|
||||
|
||||
if __name__ == "__main__":
|
||||
mlaver=Kripke([0,1,2,3,4],[0],["b","f","d","v"],[(0,1),(0,2),(1,3),(1,0),(2,3),(3,2),(3,4),(4,4)],{0:set(),1:{"f"},2:{"b"},3:{"b","f"},4:{"b","f","d","v"}})
|
||||
mlaver.toGBA().export("GBAlaver")
|
49
surete/tp_ltl/src/product.py
Normal file
49
surete/tp_ltl/src/product.py
Normal file
|
@ -0,0 +1,49 @@
|
|||
from GBA import *
|
||||
|
||||
#Entrée : une formule sous forme de liste de littéraux (ie une variable ou sa négation)
|
||||
#Vérifie que cette formule est consistente cad qu'elle ne contient pas à la fois une variable et sa négation
|
||||
def is_consistent(formula: Iterable[str]):
|
||||
for l in formula:
|
||||
if "~" + l in formula:
|
||||
return False
|
||||
return True
|
||||
|
||||
#Entrées : 2 automates de buchi généralisés
|
||||
#Sortie : le produit synchrone de ces automates
|
||||
def product[T, U](gba1: GBA[T], gba2: GBA[U]):
|
||||
states = {(state1, state2) for state1 in gba1.all_states for state2 in gba2.all_states}
|
||||
initials = {(state1, state2) for state1 in gba1.initial_states for state2 in gba2.initial_states}
|
||||
formulas = {f1.union(f2) for f1 in gba1.formulas for f2 in gba2.formulas}
|
||||
ap = gba1.ap.union(gba2.ap)
|
||||
edges = list[edge]()
|
||||
|
||||
|
||||
#à compléter
|
||||
product_impl(gba1, gba2, edges)
|
||||
|
||||
|
||||
accepting = list[frozenset[tuple[state[T], state[U]]]]()
|
||||
for acc in gba1.accepting_states:
|
||||
accepting.append(frozenset({(state1, state2) for state1 in acc for state2 in gba2.all_states}))
|
||||
for acc in gba2.accepting_states:
|
||||
accepting.append(frozenset({(state1, state2) for state1 in gba1.all_states for state2 in acc}))
|
||||
return GBA(states, initials, ap, edges, accepting)
|
||||
|
||||
|
||||
def product_impl[T, U](gpa_left: GBA[T], gba_right: GBA[U], edges: list[edge[tuple[state[T], state[U]]]]):
|
||||
for (d1, f1), dest1 in gpa_left.next_states.items():
|
||||
for (d2, f2), dest2 in gba_right.next_states.items():
|
||||
for item1 in dest1:
|
||||
for item2 in dest2:
|
||||
vari = f1.union(f2)
|
||||
if is_consistent(vari):
|
||||
edges.append(((d1, d2), vari, (item1, item2)))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gba1 =GBA([0,1],[0,1],["a"],[(0,[],0),(0,["a"],1),(1,["a"],1)],[[1]])
|
||||
gba2 =GBA([0,1],[0,1],["b"],[(0,[],0),(0,["b"],1),(1,["b"],1)],[[1]])
|
||||
produitTest = product(gba1,gba2)
|
||||
produitTest.export("produitTest")
|
||||
produit = GBA([0,1,2,3],[0,1,2,3],["a", "b"],[(0,[],0),(0,["a","b"],3),(0,["a"],1),(1,["a"],1),(0,["b"],2),(1,["a","b"],3),(2,["b"],2), (2,["a","b"],3), (3,["a","b"],3)],[[1,3],[2,3]])
|
||||
produit.export("produit")
|
0
surete/tp_ltl/src/py.typed
Normal file
0
surete/tp_ltl/src/py.typed
Normal file
9
surete/tp_ltl/src/utils.py
Normal file
9
surete/tp_ltl/src/utils.py
Normal file
|
@ -0,0 +1,9 @@
|
|||
from typing import Iterable, TypeVar
|
||||
|
||||
T = TypeVar("T")
|
||||
edge = tuple[T | int, Iterable[str], T | int]
|
||||
del T
|
||||
|
||||
T = TypeVar("T")
|
||||
state = T | int
|
||||
del T
|
BIN
surete/tp_ltl/sujet.pdf
Normal file
BIN
surete/tp_ltl/sujet.pdf
Normal file
Binary file not shown.
1
surete/tp_ltl/test/.gitignore
vendored
Normal file
1
surete/tp_ltl/test/.gitignore
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
/output
|
42
surete/tp_ltl/test/test.sh
Executable file
42
surete/tp_ltl/test/test.sh
Executable file
|
@ -0,0 +1,42 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
cd "$(dirname "$(realpath "$0")")"
|
||||
|
||||
|
||||
PROJ="$(dirname "$PWD")"
|
||||
|
||||
|
||||
if ! [ -f "$PROJ/venv/bin/activate" ]
|
||||
then "$PROJ/setup.sh"
|
||||
fi
|
||||
|
||||
. "$PROJ/venv/bin/activate"
|
||||
|
||||
|
||||
rm -fr output
|
||||
mkdir -p output
|
||||
cd output
|
||||
|
||||
|
||||
function run() {
|
||||
src="$1"
|
||||
name="$(basename "$src")"
|
||||
(
|
||||
set +e
|
||||
python "$src" > "$name.log" 2>&1
|
||||
if [ "$?" = "0" ]
|
||||
then echo "OK $name"
|
||||
else echo "ERR $name 'test/output/$name.log'"
|
||||
fi
|
||||
set -e
|
||||
) || true
|
||||
}
|
||||
|
||||
|
||||
run "$PROJ/src/kripke.py"
|
||||
run "$PROJ/src/GBA.py"
|
||||
run "$PROJ/src/LTL.py"
|
||||
run "$PROJ/src/LTLchecker.py"
|
||||
run "$PROJ/src/LTLtoGBA.py"
|
||||
run "$PROJ/src/product.py"
|
||||
run "$PROJ/src/emptyTest.py"
|
Loading…
Add table
Add a link
Reference in a new issue