Skip to content

Commit

Permalink
Reduce flake8 exeptions in setup.cfg
Browse files Browse the repository at this point in the history
  • Loading branch information
UlrichB22 committed Jul 8, 2022
1 parent 20b340e commit 576d9ed
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 20 deletions.
2 changes: 0 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,4 @@ exclude =
src/moin/config/default.py, # some formatting issues expected there
src/moin/constants/chartypes.py, # auto-generated, long lines
src/moin/datastructures/__init__.py # import on package level
src/moin/scripts/migration/moin19/_logfile19.py, # legacy code "as is"
src/moin/scripts/migration/moin19/_utils19.py, # legacy code "as is"
src/moin/utils/SubProcess.py, # 3rd party stuff, patched stdlib code
30 changes: 16 additions & 14 deletions src/moin/scripts/migration/moin19/_logfile19.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
class LogError(Exception):
""" Base class for log errors """


class LogMissing(LogError):
""" Raised when the log is missing """

Expand Down Expand Up @@ -70,11 +71,11 @@ def __init__(self, file, offset, size, forward=True):

# now calculate the file offsets of all read lines
offsets = [len(line) for line in lines]
offsets.append(0) # later this element will have the file offset after the last read line
offsets.append(0) # later this element will have the file offset after the last read line

lengthpreviousline = 0
offset = begin
for i in range(linecount+1):
for i in range(linecount + 1):
offset += lengthpreviousline
lengthpreviousline = offsets[i]
offsets[i] = offset
Expand All @@ -100,7 +101,7 @@ def __init__(self, filename, buffer_size=4096):
"""
self.loglevel = logging.NOTSET
self.__filename = filename
self.__buffer = None # currently used buffer, points to one of the following:
self.__buffer = None # currently used buffer, points to one of the following:
self.__buffer1 = None
self.__buffer2 = None
self.buffer_size = buffer_size
Expand Down Expand Up @@ -137,7 +138,7 @@ def __getattr__(self, name):
"""
generate some attributes when needed
"""
if name == "_LogFile__rel_index": # Python black magic: this is the real name of the __rel_index attribute
if name == "_LogFile__rel_index": # Python black magic: this is the real name of the __rel_index attribute
# starting iteration from begin
self.__buffer1 = LineBuffer(self._input, 0, self.buffer_size)
self.__buffer2 = LineBuffer(self._input,
Expand All @@ -152,7 +153,7 @@ def __getattr__(self, name):
# Use binary mode in order to retain \r - otherwise the offset calculation would fail.
self._input = open(self.__filename, "rb", )
except IOError as err:
if err.errno == errno.ENOENT: # "file not found"
if err.errno == errno.ENOENT: # "file not found"
# XXX workaround if edit-log does not exist: just create it empty
# if this workaround raises another error, we don't catch
# it, so the admin will see it.
Expand All @@ -161,7 +162,8 @@ def __getattr__(self, name):
f.close()
self._input = open(self.__filename, "rb", )
else:
logging.error("logfile: {0!r} IOERROR errno {1} ({2})".format(self.__filename, err.errno, os.strerror(err.errno)))
logging.error("logfile: {0!r} IOERROR errno {1} ({2})".format(self.__filename,
err.errno, os.strerror(err.errno)))
raise
return self._input
elif name == "_output":
Expand Down Expand Up @@ -233,7 +235,7 @@ def peek(self, lines):
# change to buffer 1
self.__buffer = self.__buffer1
self.__rel_index += self.__buffer.len
else: # self.__buffer is self.__buffer1
else: # self.__buffer is self.__buffer1
if self.__buffer.offsets[0] == 0:
# already at the beginning of the file
self.__rel_index = 0
Expand All @@ -254,17 +256,16 @@ def peek(self, lines):
# change to buffer 2
self.__rel_index -= self.__buffer.len
self.__buffer = self.__buffer2
else: # self.__buffer is self.__buffer2
else: # self.__buffer is self.__buffer2
# try to load next buffer
tmpbuff = LineBuffer(self._input,
self.__buffer.offsets[-1],
self.buffer_size)
if tmpbuff.len == 0:
# end of file
if self.__lineno is not None:
self.__lineno += (lines -
(self.__rel_index - self.__buffer.len))
self.__rel_index = self.__buffer.len # point to after last read line
self.__lineno += (lines - (self.__rel_index - self.__buffer.len))
self.__rel_index = self.__buffer.len # point to after last read line
return True
# shift buffers
self.__rel_index -= self.__buffer.len
Expand Down Expand Up @@ -337,7 +338,7 @@ def to_begin(self):
def to_end(self):
"""moves file position to the end"""
logging.log(self.loglevel, "LogFile.to_end {0}".format(self.__filename))
self._input.seek(0, 2) # to end of file
self._input.seek(0, 2) # to end of file
size = self._input.tell()
if self.__buffer2 is None or size > self.__buffer2.offsets[-1]:
self.__buffer2 = LineBuffer(self._input,
Expand Down Expand Up @@ -454,5 +455,6 @@ def _add(self, line):
if line[-1] != '\n':
line += '\n'
self._output.write(line)
self._output.close() # does this maybe help against the sporadic fedora wikis 160 \0 bytes in the edit-log?
del self._output # re-open the output file automagically
self._output.close() # does this maybe help against the
# sporadic fedora wikis 160 \0 bytes in the edit-log?
del self._output # re-open the output file automagically
8 changes: 4 additions & 4 deletions src/moin/scripts/migration/moin19/_utils19.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def split_body(body):
comments = []
while body.startswith('#'):
try:
line, body = body.split('\n', 1) # extract first line
line, body = body.split('\n', 1) # extract first line
line = line.rstrip('\r')
except ValueError:
line = body
Expand All @@ -43,7 +43,7 @@ def split_body(body):
if line[1] == '#': # two hash marks are a comment
comments.append(line + '\n')
else:
verb, args = (line[1:] + ' ').split(' ', 1) # split at the first blank
verb, args = (line[1:] + ' ').split(' ', 1) # split at the first blank
pi.setdefault(verb.lower(), []).append(args.strip())

for key, value in pi.items():
Expand All @@ -52,7 +52,7 @@ def split_body(body):
pi[key] = ' '.join(value)
else:
# for keys that can't occur multiple times, don't use a list:
pi[key] = value[-1] # use the last value to copy 1.9 parsing behaviour
pi[key] = value[-1] # use the last value to copy 1.9 parsing behaviour

if comments:
body = ''.join(comments) + body
Expand Down Expand Up @@ -139,7 +139,7 @@ def unquoteWikiname(filename, charset=CHARSET19):
raise InvalidFileNameError(filename)
try:
for i in range(0, len(group), 2):
byte = group[i:i+2]
byte = group[i:i + 2]
parts.append(bytes.fromhex(byte))
except ValueError:
# byte not in hex, e.g 'xy'
Expand Down

0 comments on commit 576d9ed

Please sign in to comment.