Skip to content

Commit

Permalink
style: ignore flake8 name errors for existing names
Browse files Browse the repository at this point in the history
This patch setups up some flake8 ignores for names that will come up
more in the future now that we have flakehell running.

One case where this is relevant:
- #1944
  • Loading branch information
aucampia committed May 16, 2022
1 parent a2866cd commit 35bdde3
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 44 deletions.
17 changes: 9 additions & 8 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,14 +16,15 @@ pyflakes = [
pep8-naming = ["+*"]

[tool.flakeheaven.exceptions."rdflib/plugins/sparql/*"]
pep8-naming = [
"-N802",
"-N803",
"-N806",
"-N812",
"-N816",
"-N801",
]
pep8-naming = ["-N802", "-N803", "-N806", "-N812", "-N816", "-N801"]
[tool.flakeheaven.exceptions."rdflib/namespace/_*"]
pep8-naming = ["-N815"]
[tool.flakeheaven.exceptions."rdflib/extras/infixowl.py"]
pep8-naming = ["-N802", "-N803", "-N806", "-N815"]
[tool.flakeheaven.exceptions."rdflib/plugins/parsers/notation3.py"]
pep8-naming = ["-N802", "-N803", "-N806", "-N816"]
[tool.flakeheaven.exceptions."rdflib/plugins/serializers/turtle.py"]
pep8-naming = ["-N802", "-N806", "-N815"]


[tool.black]
Expand Down
54 changes: 28 additions & 26 deletions rdflib/term.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ class IdentifiedNode(Identifier):
def __getnewargs__(self) -> Tuple[str]:
return (str(self),)

def toPython(self) -> str:
def toPython(self) -> str: # noqa: N802
return str(self)


Expand Down Expand Up @@ -317,9 +317,9 @@ def __reduce__(self) -> Tuple[Type["URIRef"], Tuple[str]]:

def __repr__(self) -> str:
if self.__class__ is URIRef:
clsName = "rdflib.term.URIRef"
clsName = "rdflib.term.URIRef" # noqa: N806
else:
clsName = self.__class__.__name__
clsName = self.__class__.__name__ # noqa: N806

return """%s(%s)""" % (clsName, super(URIRef, self).__repr__())

Expand Down Expand Up @@ -468,9 +468,9 @@ def __reduce__(self) -> Tuple[Type["BNode"], Tuple[str]]:

def __repr__(self) -> str:
if self.__class__ is BNode:
clsName = "rdflib.term.BNode"
clsName = "rdflib.term.BNode" # noqa: N806
else:
clsName = self.__class__.__name__
clsName = self.__class__.__name__ # noqa: N806
return """%s('%s')""" % (clsName, str(self))

def skolemize(
Expand Down Expand Up @@ -1603,12 +1603,12 @@ def __repr__(self) -> str:
if self.datatype is not None:
args.append("datatype=%s" % repr(self.datatype))
if self.__class__ == Literal:
clsName = "rdflib.term.Literal"
clsName = "rdflib.term.Literal" # noqa: N806
else:
clsName = self.__class__.__name__
clsName = self.__class__.__name__ # noqa: N806
return """%s(%s)""" % (clsName, ", ".join(args))

def toPython(self) -> Any:
def toPython(self) -> Any: # noqa: N802
"""
Returns an appropriate python datatype derived from this RDF Literal
"""
Expand All @@ -1618,15 +1618,15 @@ def toPython(self) -> Any:
return self


def _parseXML(xmlstring: str) -> xml.dom.minidom.Document:
def _parseXML(xmlstring: str) -> xml.dom.minidom.Document: # noqa: N802
retval = xml.dom.minidom.parseString(
"<rdflibtoplevelelement>%s</rdflibtoplevelelement>" % xmlstring
)
retval.normalize()
return retval


def _parseHTML(htmltext: str) -> xml.dom.minidom.DocumentFragment:
def _parseHTML(htmltext: str) -> xml.dom.minidom.DocumentFragment: # noqa: N802
try:
import html5lib

Expand All @@ -1641,7 +1641,7 @@ def _parseHTML(htmltext: str) -> xml.dom.minidom.DocumentFragment:
)


def _writeXML(
def _writeXML( # noqa: N802
xmlnode: Union[xml.dom.minidom.Document, xml.dom.minidom.DocumentFragment]
) -> bytes:
if isinstance(xmlnode, xml.dom.minidom.DocumentFragment):
Expand All @@ -1668,7 +1668,7 @@ def _unhexlify(value: Union[str, bytes, Literal]) -> bytes:
return unhexlify(value)


def _parseBoolean(value: Union[str, bytes]) -> bool:
def _parseBoolean(value: Union[str, bytes]) -> bool: # noqa: N802
"""
Boolean is a datatype with value space {true,false},
lexical space {"true", "false","1","0"} and
Expand Down Expand Up @@ -1895,7 +1895,7 @@ def _well_formed_negative_integer(lexical: Union[str, bytes], value: Any) -> boo

def _py2literal(
obj: Any,
pType: Any,
pType: Any, # noqa: N803
castFunc: Optional[Callable[[Any], Any]],
dType: Optional[str],
) -> Tuple[Any, Optional[str]]:
Expand All @@ -1907,20 +1907,20 @@ def _py2literal(
return obj, None


def _castPythonToLiteral(
def _castPythonToLiteral( # noqa: N802
obj: Any, datatype: Optional[str]
) -> Tuple[Any, Optional[str]]:
"""
Casts a tuple of a python type and a special datatype URI to a tuple of the lexical value and a
datatype URI (or None)
"""
castFunc: Optional[Callable[[Any], Union[str, bytes]]]
dType: Optional[str]
for (pType, dType), castFunc in _SpecificPythonToXSDRules:
castFunc: Optional[Callable[[Any], Union[str, bytes]]] # noqa: N806
dType: Optional[str] # noqa: N806
for (pType, dType), castFunc in _SpecificPythonToXSDRules: # noqa: N806
if isinstance(obj, pType) and dType == datatype:
return _py2literal(obj, pType, castFunc, dType)

for pType, (castFunc, dType) in _GenericPythonToXSDRules:
for pType, (castFunc, dType) in _GenericPythonToXSDRules: # noqa: N806
if isinstance(obj, pType):
return _py2literal(obj, pType, castFunc, dType)
return obj, None # TODO: is this right for the fall through case?
Expand Down Expand Up @@ -2024,17 +2024,19 @@ def _castPythonToLiteral(
URIRef(_XSD_PFX + "unsignedByte"): _well_formed_unsignedbyte,
}

_toPythonMapping: Dict[Optional[str], Optional[Callable[[str], Any]]] = {}
_toPythonMapping: Dict[Optional[str], Optional[Callable[[str], Any]]] = {} # noqa: N816

_toPythonMapping.update(XSDToPython)


def _castLexicalToPython(lexical: Union[str, bytes], datatype: Optional[str]) -> Any:
def _castLexicalToPython( # noqa: N802
lexical: Union[str, bytes], datatype: Optional[str]
) -> Any:
"""
Map a lexical form to the value-space for the given datatype
:returns: a python object for the value or ``None``
"""
convFunc = _toPythonMapping.get(datatype, False)
convFunc = _toPythonMapping.get(datatype, False) # noqa: N806
if convFunc:
if TYPE_CHECKING:
# NOTE: This is here because convFunc is seen as
Expand Down Expand Up @@ -2069,7 +2071,7 @@ def _castLexicalToPython(lexical: Union[str, bytes], datatype: Optional[str]) ->
_AnyT = TypeVar("_AnyT", bound=Any)


def _normalise_XSD_STRING(lexical_or_value: _AnyT) -> _AnyT:
def _normalise_XSD_STRING(lexical_or_value: _AnyT) -> _AnyT: # noqa: N802
"""
Replaces \t, \n, \r (#x9 (tab), #xA (linefeed), and #xD (carriage return)) with space without any whitespace collapsing
"""
Expand Down Expand Up @@ -2142,13 +2144,13 @@ def __new__(cls, value: str) -> "Variable":

def __repr__(self) -> str:
if self.__class__ is Variable:
clsName = "rdflib.term.Variable"
clsName = "rdflib.term.Variable" # noqa: N806
else:
clsName = self.__class__.__name__
clsName = self.__class__.__name__ # noqa: N806

return """%s(%s)""" % (clsName, super(Variable, self).__repr__())

def toPython(self) -> str:
def toPython(self) -> str: # noqa: N802
return "?%s" % self

def n3(self, namespace_manager: Optional["NamespaceManager"] = None) -> str:
Expand All @@ -2166,7 +2168,7 @@ def __reduce__(self) -> Tuple[Type["Variable"], Tuple[str]]:
_ORDERING.update({BNode: 10, Variable: 20, URIRef: 30, Literal: 40})


def _isEqualXMLNode(
def _isEqualXMLNode( # noqa: N802
node: Union[
None,
xml.dom.minidom.Attr,
Expand Down
22 changes: 12 additions & 10 deletions rdflib/void.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,9 @@
from rdflib.namespace import RDF, VOID


def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
def generateVoID( # noqa: N802
g, dataset=None, res=None, distinctForPartitions=True # noqa: N803
):
"""
Returns a new graph with a VoID description of the passed dataset
Expand All @@ -24,8 +26,8 @@ def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
"""

typeMap = collections.defaultdict(set)
classes = collections.defaultdict(set)
typeMap = collections.defaultdict(set) # noqa: N806
classes = collections.defaultdict(set) # noqa: N806
for e, c in g.subject_objects(RDF.type):
classes[c].add(e)
typeMap[e].add(c)
Expand All @@ -34,13 +36,13 @@ def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
subjects = set()
objects = set()
properties = set()
classCount = collections.defaultdict(int)
propCount = collections.defaultdict(int)
classCount = collections.defaultdict(int) # noqa: N806
propCount = collections.defaultdict(int) # noqa: N806

classProps = collections.defaultdict(set)
classObjects = collections.defaultdict(set)
propSubjects = collections.defaultdict(set)
propObjects = collections.defaultdict(set)
classProps = collections.defaultdict(set) # noqa: N806
classObjects = collections.defaultdict(set) # noqa: N806
propSubjects = collections.defaultdict(set) # noqa: N806
propObjects = collections.defaultdict(set) # noqa: N806

for s, p, o in g:

Expand Down Expand Up @@ -109,7 +111,7 @@ def generateVoID(g, dataset=None, res=None, distinctForPartitions=True):
if distinctForPartitions:

entities = 0
propClasses = set()
propClasses = set() # noqa: N806
for s in propSubjects[p]:
if s in typeMap:
entities += 1
Expand Down

0 comments on commit 35bdde3

Please sign in to comment.