aboutsummaryrefslogtreecommitdiffstats
path: root/bsfs/triple_store/sparql
diff options
context:
space:
mode:
authorMatthias Baumgartner <dev@igsor.net>2022-12-22 20:35:15 +0100
committerMatthias Baumgartner <dev@igsor.net>2022-12-22 20:35:15 +0100
commit9ab60f915fa53ae2ac2cf06b2f68138ffaa534d4 (patch)
treee6290053c00e06fda9e41ac0a602ff53d91a38ac /bsfs/triple_store/sparql
parente94368c75468e3e94382b12705e55d396249eaca (diff)
parentca7ee6c59d2eb3f4ec4d16e392d12d946cd85e4d (diff)
downloadbsfs-9ab60f915fa53ae2ac2cf06b2f68138ffaa534d4.tar.gz
bsfs-9ab60f915fa53ae2ac2cf06b2f68138ffaa534d4.tar.bz2
bsfs-9ab60f915fa53ae2ac2cf06b2f68138ffaa534d4.zip
Merge branch 'mb/filter' into develop
Diffstat (limited to 'bsfs/triple_store/sparql')
-rw-r--r--bsfs/triple_store/sparql/__init__.py18
-rw-r--r--bsfs/triple_store/sparql/parse_filter.py307
-rw-r--r--bsfs/triple_store/sparql/sparql.py297
3 files changed, 622 insertions, 0 deletions
diff --git a/bsfs/triple_store/sparql/__init__.py b/bsfs/triple_store/sparql/__init__.py
new file mode 100644
index 0000000..285334a
--- /dev/null
+++ b/bsfs/triple_store/sparql/__init__.py
@@ -0,0 +1,18 @@
+"""
+
+Part of the BlackStar filesystem (bsfs) module.
+A copy of the license is provided with the project.
+Author: Matthias Baumgartner, 2022
+"""
+# imports
+import typing
+
+# inner-module imports
+from .sparql import SparqlStore
+
+# exports
+__all__: typing.Sequence[str] = (
+ 'SparqlStore',
+ )
+
+## EOF ##
diff --git a/bsfs/triple_store/sparql/parse_filter.py b/bsfs/triple_store/sparql/parse_filter.py
new file mode 100644
index 0000000..d4db0aa
--- /dev/null
+++ b/bsfs/triple_store/sparql/parse_filter.py
@@ -0,0 +1,307 @@
+"""
+
+Part of the BlackStar filesystem (bsfs) module.
+A copy of the license is provided with the project.
+Author: Matthias Baumgartner, 2022
+"""
+# imports
+import typing
+
+# bsfs imports
+from bsfs import schema as bsc
+from bsfs.namespace import ns
+from bsfs.query import ast
+from bsfs.utils import URI, errors
+
+# exports
+__all__: typing.Sequence[str] = (
+ 'Filter',
+ )
+
+class _GenHopName():
+ """Generator that produces a new unique symbol name with each iteration."""
+
+ # Symbol name prefix.
+ prefix: str
+
+ # Current counter.
+ curr: int
+
+ def __init__(self, prefix: str = '?hop', start: int = 0):
+ self.prefix = prefix
+ self.curr = start - 1
+
+ def __next__(self):
+ """Generate and return the next unique name."""
+ self.curr += 1
+ return self.prefix + str(self.curr)
+
+
+class Filter():
+ """Translate `bsfs.query.ast.filter` structures into Sparql queries."""
+
+ # Current schema to validate against.
+ schema: bsc.Schema
+
+ # Generator that produces unique symbol names.
+ ngen: _GenHopName
+
+ # Vertex type.
+ T_VERTEX = typing.Union[bsc.Node, bsc.Literal]
+
+ def __init__(self, schema):
+ self.schema = schema
+ self.ngen = _GenHopName()
+
+ def __call__(
+ self,
+ root_type: bsc.Node,
+ root: typing.Optional[ast.filter.FilterExpression] = None,
+ ) -> str:
+ """
+ """
+ # check root_type
+ if not isinstance(root_type, bsc.Node):
+ raise errors.BackendError(f'expected Node, found {root_type}')
+ if root_type not in self.schema.nodes():
+ raise errors.ConsistencyError(f'node {root_type} is not in the schema')
+ # parse root
+ if root is None:
+ cond = ''
+ else:
+ cond = self._parse_filter_expression(root_type, root, '?ent')
+ # assemble query
+ return f'''
+ SELECT ?ent
+ WHERE {{
+ ?ent <{ns.rdf.type}>/<{ns.rdfs.subClassOf}>* <{root_type.uri}> .
+ {cond}
+ }}
+ '''
+
+ def _parse_filter_expression(self, type_: T_VERTEX, node: ast.filter.FilterExpression, head: str) -> str:
+ """Route *node* to the handler of the respective FilterExpression subclass."""
+ if isinstance(node, ast.filter.Is):
+ return self._is(type_, node, head)
+ if isinstance(node, ast.filter.Not):
+ return self._not(type_, node, head)
+ if isinstance(node, ast.filter.Has):
+ return self._has(type_, node, head)
+ if isinstance(node, ast.filter.Any):
+ return self._any(type_, node, head)
+ if isinstance(node, ast.filter.All):
+ return self._all(type_, node, head)
+ if isinstance(node, ast.filter.And):
+ return self._and(type_, node, head)
+ if isinstance(node, ast.filter.Or):
+ return self._or(type_, node, head)
+ if isinstance(node, ast.filter.Equals):
+ return self._equals(type_, node, head)
+ if isinstance(node, ast.filter.Substring):
+ return self._substring(type_, node, head)
+ if isinstance(node, ast.filter.StartsWith):
+ return self._starts_with(type_, node, head)
+ if isinstance(node, ast.filter.EndsWith):
+ return self._ends_with(type_, node, head)
+ if isinstance(node, ast.filter.LessThan):
+ return self._less_than(type_, node, head)
+ if isinstance(node, ast.filter.GreaterThan):
+ return self._greater_than(type_, node, head)
+ # invalid node
+ raise errors.BackendError(f'expected filter expression, found {node}')
+
+ def _parse_predicate_expression(
+ self,
+ type_: T_VERTEX,
+ node: ast.filter.PredicateExpression
+ ) -> typing.Tuple[str, T_VERTEX]:
+ """Route *node* to the handler of the respective PredicateExpression subclass."""
+ if isinstance(node, ast.filter.Predicate):
+ return self._predicate(type_, node)
+ if isinstance(node, ast.filter.OneOf):
+ return self._one_of(type_, node)
+ # invalid node
+ raise errors.BackendError(f'expected predicate expression, found {node}')
+
+ def _one_of(self, node_type: T_VERTEX, node: ast.filter.OneOf) -> typing.Tuple[str, T_VERTEX]:
+ """
+ """
+ if not isinstance(node_type, bsc.Node):
+ raise errors.BackendError(f'expected Node, found {node_type}')
+ # walk through predicates
+ suburi, rng = set(), None
+ for pred in node: # OneOf guarantees at least one expression
+ puri, subrng = self._parse_predicate_expression(node_type, pred)
+ # track predicate uris
+ suburi.add(puri)
+ try:
+ # check for more generic range
+ if rng is None or subrng > rng:
+ rng = subrng
+ # check range consistency
+ if not subrng <= rng and not subrng >= rng:
+ raise errors.ConsistencyError(f'ranges {subrng} and {rng} are not related')
+ except TypeError as err: # subrng and rng are not comparable
+ raise errors.ConsistencyError(f'ranges {subrng} and {rng} are not related') from err
+ if rng is None:
+ # for mypy to be certain of the rng type
+ # if rng were None, we'd have gotten a TypeError above (None > None)
+ raise errors.UnreachableError()
+ # return joint predicate expression and next range
+ return '|'.join(suburi), rng
+
+ def _predicate(self, node_type: T_VERTEX, node: ast.filter.Predicate) -> typing.Tuple[str, T_VERTEX]:
+ """
+ """
+ # check node_type
+ if not isinstance(node_type, bsc.Node):
+ raise errors.BackendError(f'expected Node, found {node_type}')
+ # fetch predicate and its uri
+ puri = node.predicate
+ # get and check predicate, domain, and range
+ if not self.schema.has_predicate(puri):
+ raise errors.ConsistencyError(f'predicate {puri} is not in the schema')
+ pred = self.schema.predicate(puri)
+ if pred.range is None:
+ # FIXME: It is a design error that Predicates can have a None range...
+ raise errors.BackendError(f'predicate {pred} has no range')
+ dom, rng = pred.domain, pred.range
+ # encapsulate predicate uri
+ puri = f'<{puri}>' # type: ignore [assignment] # variable re-use confuses mypy
+ # apply reverse flag
+ if node.reverse:
+ puri = URI('^' + puri)
+ dom, rng = rng, dom # type: ignore [assignment] # variable re-use confuses mypy
+ # check path consistency
+ if not node_type <= dom:
+ raise errors.ConsistencyError(f'expected type {dom} or subtype thereof, found {node_type}')
+ # return predicate URI and next node type
+ return puri, rng
+
+ def _any(self, node_type: T_VERTEX, node: ast.filter.Any, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Node):
+ raise errors.BackendError(f'expected Node, found {node_type}')
+ # parse predicate
+ pred, next_type = self._parse_predicate_expression(node_type, node.predicate)
+ # parse expression
+ nexthead = next(self.ngen)
+ expr = self._parse_filter_expression(next_type, node.expr, nexthead)
+ # combine results
+ return f'{head} {pred} {nexthead} . {expr}'
+
+ def _all(self, node_type: T_VERTEX, node: ast.filter.All, head: str) -> str:
+ """
+ """
+ # NOTE: All(P, E) := Not(Any(P, Not(E))) and EXISTS(P, ?)
+ if not isinstance(node_type, bsc.Node):
+ raise errors.BackendError(f'expected Node, found {node_type}')
+ # parse rewritten ast
+ expr = self._parse_filter_expression(node_type,
+ ast.filter.Not(
+ ast.filter.Any(node.predicate,
+ ast.filter.Not(node.expr))), head)
+ # parse predicate for existence constraint
+ pred, _ = self._parse_predicate_expression(node_type, node.predicate)
+ temphead = next(self.ngen)
+ # return existence and rewritten expression
+ return f'FILTER EXISTS {{ {head} {pred} {temphead} }} . ' + expr
+
+ def _and(self, node_type: T_VERTEX, node: ast.filter.And, head: str) -> str:
+ """
+ """
+ sub = [self._parse_filter_expression(node_type, expr, head) for expr in node]
+ return ' . '.join(sub)
+
+ def _or(self, node_type: T_VERTEX, node: ast.filter.Or, head: str) -> str:
+ """
+ """
+ # potential special case optimization:
+ # * ast: Or(Equals('foo'), Equals('bar'), ...)
+ # * query: VALUES ?head { "value1"^^<...> "value2"^^<...> "value3"^<...> ... }
+ sub = [self._parse_filter_expression(node_type, expr, head) for expr in node]
+ sub = ['{' + expr + '}' for expr in sub]
+ return ' UNION '.join(sub)
+
+ def _not(self, node_type: T_VERTEX, node: ast.filter.Not, head: str) -> str:
+ """
+ """
+ expr = self._parse_filter_expression(node_type, node.expr, head)
+ if isinstance(node_type, bsc.Literal):
+ return f'MINUS {{ {expr} }}'
+ # NOTE: for bsc.Node types, we must include at least one expression in the body of MINUS,
+ # otherwise the connection between the context and body of MINUS is lost.
+ # The simplest (and non-interfering) choice is a type statement.
+ return f'MINUS {{ {head} <{ns.rdf.type}>/<{ns.rdfs.subClassOf}>* <{node_type.uri}> . {expr} }}'
+
+ def _has(self, node_type: T_VERTEX, node: ast.filter.Has, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Node):
+ raise errors.BackendError(f'expected Node, found {node_type}')
+ # parse predicate
+ pred, _ = self._parse_predicate_expression(node_type, node.predicate)
+ # get new heads
+ inner = next(self.ngen)
+ outer = next(self.ngen)
+ # predicate count expression (fetch number of predicates at *head*)
+ num_preds = f'{{ SELECT (COUNT(distinct {inner}) as {outer}) WHERE {{ {head} {pred} {inner} }} }}'
+ # count expression
+ # FIXME: We have to ensure that ns.xsd.integer is always known in the schema!
+ count_bounds = self._parse_filter_expression(self.schema.literal(ns.xsd.integer), node.count, outer)
+ # combine
+ return num_preds + ' . ' + count_bounds
+
+ def _is(self, node_type: T_VERTEX, node: ast.filter.Is, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Node):
+ raise errors.BackendError(f'expected Node, found {node_type}')
+ return f'VALUES {head} {{ <{node.value}> }}'
+
+ def _equals(self, node_type: T_VERTEX, node: ast.filter.Equals, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Literal):
+ raise errors.BackendError(f'expected Literal, found {node}')
+ return f'VALUES {head} {{ "{node.value}"^^<{node_type.uri}> }}'
+
+ def _substring(self, node_type: T_VERTEX, node: ast.filter.Substring, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Literal):
+ raise errors.BackendError(f'expected Literal, found {node_type}')
+ return f'FILTER contains(str({head}), "{node.value}")'
+
+ def _starts_with(self, node_type: T_VERTEX, node: ast.filter.StartsWith, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Literal):
+ raise errors.BackendError(f'expected Literal, found {node_type}')
+ return f'FILTER strstarts(str({head}), "{node.value}")'
+
+ def _ends_with(self, node_type: T_VERTEX, node: ast.filter.EndsWith, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Literal):
+ raise errors.BackendError(f'expected Literal, found {node_type}')
+ return f'FILTER strends(str({head}), "{node.value}")'
+
+ def _less_than(self, node_type: T_VERTEX, node: ast.filter.LessThan, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Literal):
+ raise errors.BackendError(f'expected Literal, found {node_type}')
+ equality = '=' if not node.strict else ''
+ return f'FILTER ({head} <{equality} {float(node.threshold)})'
+
+ def _greater_than(self, node_type: T_VERTEX, node: ast.filter.GreaterThan, head: str) -> str:
+ """
+ """
+ if not isinstance(node_type, bsc.Literal):
+ raise errors.BackendError(f'expected Literal, found {node_type}')
+ equality = '=' if not node.strict else ''
+ return f'FILTER ({head} >{equality} {float(node.threshold)})'
+
+## EOF ##
diff --git a/bsfs/triple_store/sparql/sparql.py b/bsfs/triple_store/sparql/sparql.py
new file mode 100644
index 0000000..c3cbff6
--- /dev/null
+++ b/bsfs/triple_store/sparql/sparql.py
@@ -0,0 +1,297 @@
+"""
+
+Part of the BlackStar filesystem (bsfs) module.
+A copy of the license is provided with the project.
+Author: Matthias Baumgartner, 2022
+"""
+# imports
+import itertools
+import typing
+import rdflib
+
+# bsfs imports
+from bsfs import schema as bsc
+from bsfs.query import ast
+from bsfs.utils import errors, URI
+
+# inner-module imports
+from . import parse_filter
+from .. import base
+
+
+# exports
+__all__: typing.Sequence[str] = (
+ 'SparqlStore',
+ )
+
+
+## code ##
+
+class _Transaction():
+ """Lightweight rdflib transactions for in-memory databases."""
+
+ # graph instance.
+ _graph: rdflib.Graph
+
+ # current log of added triples.
+ _added: typing.List[typing.Any]
+
+ # current log of removed triples.
+ _removed: typing.List[typing.Any]
+
+ def __init__(self, graph: rdflib.Graph):
+ self._graph = graph
+ # initialize internal structures
+ self.commit()
+
+ def commit(self):
+ """Commit temporary changes."""
+ self._added = []
+ self._removed = []
+
+ def rollback(self):
+ """Undo changes since the last commit."""
+ for triple in self._added:
+ self._graph.remove(triple)
+ for triple in self._removed:
+ self._graph.add(triple)
+
+ def add(self, triple: typing.Any):
+ """Add a triple to the graph."""
+ if triple not in self._graph:
+ self._added.append(triple)
+ self._graph.add(triple)
+
+ def remove(self, triple: typing.Any):
+ """Remove a triple from the graph."""
+ if triple in self._graph:
+ self._removed.append(triple)
+ self._graph.remove(triple)
+
+
+class SparqlStore(base.TripleStoreBase):
+ """Sparql-based triple store.
+
+ The sparql triple store uses a third-party backend
+ (currently rdflib) to store triples and manages them via
+ the Sparql query language.
+
+ """
+
+ # The rdflib graph.
+ _graph: rdflib.Graph
+
+ # Current transaction.
+ _transaction: _Transaction
+
+ # The local schema.
+ _schema: bsc.Schema
+
+ # Filter parser
+ _filter_parser: parse_filter.Filter
+
+ def __init__(self):
+ super().__init__(None)
+ self._graph = rdflib.Graph()
+ self._transaction = _Transaction(self._graph)
+ self._schema = bsc.Schema.Empty()
+ self._filter_parser = parse_filter.Filter(self._schema)
+
+ # NOTE: mypy and pylint complain about the **kwargs not being listed (contrasting super)
+ # However, not having it here is clearer since it's explicit that there are no arguments.
+ @classmethod
+ def Open(cls) -> 'SparqlStore': # type: ignore [override] # pylint: disable=arguments-differ
+ return cls()
+
+ def commit(self):
+ self._transaction.commit()
+
+ def rollback(self):
+ self._transaction.rollback()
+
+ @property
+ def schema(self) -> bsc.Schema:
+ return self._schema
+
+ @schema.setter
+ def schema(self, schema: bsc.Schema):
+ # check args: Schema instanace
+ if not isinstance(schema, bsc.Schema):
+ raise TypeError(schema)
+ # check compatibility: No contradicting definitions
+ if not self.schema.consistent_with(schema):
+ raise errors.ConsistencyError(f'{schema} is inconsistent with {self.schema}')
+
+ # commit the current transaction
+ self.commit()
+
+ # adjust instances:
+ # nothing to do for added classes
+ # delete instances of removed classes
+
+ # get deleted classes
+ sub = self.schema - schema
+
+ for pred in sub.predicates:
+ # remove predicate instances
+ for src, trg in self._graph.subject_objects(rdflib.URIRef(pred.uri)):
+ self._transaction.remove((src, rdflib.URIRef(pred.uri), trg))
+ # remove predicate definition
+ if pred.parent is not None:
+ self._transaction.remove((
+ rdflib.URIRef(pred.uri),
+ rdflib.RDFS.subClassOf,
+ rdflib.URIRef(pred.parent.uri),
+ ))
+
+ # remove node instances
+ for node in sub.nodes:
+ # iterate through node instances
+ for inst in self._graph.subjects(rdflib.RDF.type, rdflib.URIRef(node.uri)):
+ # remove triples where the instance is in the object position
+ for src, pred in self._graph.subject_predicates(inst):
+ self._transaction.remove((src, pred, inst))
+ # remove triples where the instance is in the subject position
+ for pred, trg in self._graph.predicate_objects(inst):
+ self._transaction.remove((inst, pred, trg))
+ # remove instance
+ self._transaction.remove((inst, rdflib.RDF.type, rdflib.URIRef(node.uri)))
+ # remove node definition
+ if node.parent is not None:
+ self._transaction.remove((
+ rdflib.URIRef(node.uri),
+ rdflib.RDFS.subClassOf,
+ rdflib.URIRef(node.parent.uri),
+ ))
+
+ for lit in sub.literals:
+ # remove literal definition
+ if lit.parent is not None:
+ self._transaction.remove((
+ rdflib.URIRef(lit.uri),
+ rdflib.RDFS.subClassOf,
+ rdflib.URIRef(lit.parent.uri),
+ ))
+
+ # add predicate, node, and literal hierarchies to the graph
+ for itm in itertools.chain(schema.predicates(), schema.nodes(), schema.literals()):
+ if itm.parent is not None:
+ self._transaction.add((rdflib.URIRef(itm.uri), rdflib.RDFS.subClassOf, rdflib.URIRef(itm.parent.uri)))
+
+ # commit instance changes
+ self.commit()
+
+ # migrate schema
+ self._schema = schema
+ self._filter_parser.schema = schema
+
+ def get(
+ self,
+ node_type: bsc.Node,
+ query: typing.Optional[ast.filter.FilterExpression] = None,
+ ) -> typing.Iterator[URI]:
+ if node_type not in self.schema.nodes():
+ raise errors.ConsistencyError(f'{node_type} is not defined in the schema')
+ if not isinstance(query, ast.filter.FilterExpression):
+ raise TypeError(query)
+ for guid, in self._graph.query(self._filter_parser(node_type, query)):
+ yield URI(guid)
+
+ def _has_type(self, subject: URI, node_type: bsc.Node) -> bool:
+ """Return True if *subject* is a node of class *node_type* or a subclass thereof."""
+ if node_type not in self.schema.nodes():
+ raise errors.ConsistencyError(f'{node_type} is not defined in the schema')
+
+ subject_types = list(self._graph.objects(rdflib.URIRef(subject), rdflib.RDF.type))
+ if len(subject_types) == 0:
+ return False
+ if len(subject_types) == 1:
+ node = self.schema.node(URI(subject_types[0])) # type: ignore [arg-type] # URI is a subtype of str
+ if node == node_type:
+ return True
+ if node_type in node.parents():
+ return True
+ return False
+ raise errors.UnreachableError()
+
+ def exists(
+ self,
+ node_type: bsc.Node,
+ guids: typing.Iterable[URI],
+ ) -> typing.Iterable[URI]:
+ return (subj for subj in guids if self._has_type(subj, node_type))
+
+ def create(
+ self,
+ node_type: bsc.Node,
+ guids: typing.Iterable[URI],
+ ):
+ # check node_type
+ if node_type not in self.schema.nodes():
+ raise errors.ConsistencyError(f'{node_type} is not defined in the schema')
+ # check and create guids
+ for guid in guids:
+ subject = rdflib.URIRef(guid)
+ # check node existence
+ if (subject, rdflib.RDF.type, None) in self._graph:
+ # FIXME: node exists and may have a different type! ignore? raise? report?
+ continue
+ # add node
+ self._transaction.add((subject, rdflib.RDF.type, rdflib.URIRef(node_type.uri)))
+
+ def set(
+ self,
+ node_type: bsc.Node,
+ guids: typing.Iterable[URI],
+ predicate: bsc.Predicate,
+ values: typing.Iterable[typing.Any],
+ ):
+ # check node_type
+ if node_type not in self.schema.nodes():
+ raise errors.ConsistencyError(f'{node_type} is not defined in the schema')
+ # check predicate
+ if predicate not in self.schema.predicates():
+ raise errors.ConsistencyError(f'{predicate} is not defined in the schema')
+ if not node_type <= predicate.domain:
+ raise errors.ConsistencyError(f'{node_type} must be a subclass of {predicate.domain}')
+ # NOTE: predicate.range is in the schema since predicate is in the schema.
+ # materialize values
+ values = set(values)
+ # check values
+ if len(values) == 0:
+ return
+ if predicate.unique and len(values) != 1:
+ raise ValueError(values)
+ if isinstance(predicate.range, bsc.Node):
+ values = set(values) # materialize to safeguard against iterators passed as argument
+ inconsistent = {val for val in values if not self._has_type(val, predicate.range)}
+ # catches nodes that don't exist and nodes that have an inconsistent type
+ if len(inconsistent) > 0:
+ raise errors.InstanceError(inconsistent)
+ # check guids
+ # FIXME: Fail or skip inexistent nodes?
+ guids = set(guids)
+ inconsistent = {guid for guid in guids if not self._has_type(guid, node_type)}
+ if len(inconsistent) > 0:
+ raise errors.InstanceError(inconsistent)
+
+ # add triples
+ pred = rdflib.URIRef(predicate.uri)
+ for guid, value in itertools.product(guids, values):
+ guid = rdflib.URIRef(guid)
+ # convert value
+ if isinstance(predicate.range, bsc.Literal):
+ value = rdflib.Literal(value, datatype=rdflib.URIRef(predicate.range.uri))
+ elif isinstance(predicate.range, bsc.Node):
+ value = rdflib.URIRef(value)
+ else:
+ raise errors.UnreachableError()
+ # clear triples for unique predicates
+ if predicate.unique:
+ for obj in self._graph.objects(guid, pred):
+ if obj != value:
+ self._transaction.remove((guid, pred, obj))
+ # add triple
+ self._transaction.add((guid, pred, value))
+
+## EOF ##