mirror of
https://github.com/Sei-Lisa/LSL-PyOptimizer
synced 2025-07-01 07:38:21 +00:00
Misc small non-user-visible fixes
Some are Unicode vs Str stuff, others are style adjustments, others in unused functions, and so on.
This commit is contained in:
parent
59c3f9fc71
commit
d9938f1a37
5 changed files with 33 additions and 32 deletions
|
@ -18,9 +18,9 @@
|
||||||
# Expand inlined functions. This could perhaps be made at parse time, but that
|
# Expand inlined functions. This could perhaps be made at parse time, but that
|
||||||
# would obfuscate the source too much.
|
# would obfuscate the source too much.
|
||||||
|
|
||||||
from lslcommon import nr
|
from lslopt.lslcommon import nr
|
||||||
|
|
||||||
# Statement-level nodes that have at most 1 child and is of type expression
|
# Statement-level nodes that have at most 1 child and are of type expression
|
||||||
SINGLE_OPT_EXPR_CHILD_NODES = frozenset({'DECL', 'EXPR', 'RETURN',
|
SINGLE_OPT_EXPR_CHILD_NODES = frozenset({'DECL', 'EXPR', 'RETURN',
|
||||||
'@', 'STSW', 'JUMP', ';', 'LAMBDA'})
|
'@', 'STSW', 'JUMP', ';', 'LAMBDA'})
|
||||||
|
|
||||||
|
|
|
@ -3007,7 +3007,7 @@ list lazy_list_set(list L, integer i, list v)
|
||||||
del self.scopestack
|
del self.scopestack
|
||||||
|
|
||||||
if self.enable_inline:
|
if self.enable_inline:
|
||||||
import lslinliner
|
from lslopt import lslinliner
|
||||||
lslinliner.inliner().inline(self.tree, self.symtab)
|
lslinliner.inliner().inline(self.tree, self.symtab)
|
||||||
|
|
||||||
treesymtab = self.tree, self.symtab
|
treesymtab = self.tree, self.symtab
|
||||||
|
|
41
main.py
41
main.py
|
@ -96,7 +96,7 @@ def PreparePreproc(script):
|
||||||
line up to the point where the string was closed. That will place the next
|
line up to the point where the string was closed. That will place the next
|
||||||
token in the same line and column it previously was.
|
token in the same line and column it previously was.
|
||||||
"""
|
"""
|
||||||
s = ''
|
s = u''
|
||||||
nlines = 0
|
nlines = 0
|
||||||
col = 0
|
col = 0
|
||||||
|
|
||||||
|
@ -120,29 +120,29 @@ def PreparePreproc(script):
|
||||||
# least surprise seems to suggest to accept valid LSL strings as LSL
|
# least surprise seems to suggest to accept valid LSL strings as LSL
|
||||||
# instead of reproducing that C quirk. This also matches what FS is doing
|
# instead of reproducing that C quirk. This also matches what FS is doing
|
||||||
# currently, so it's good for compatibility.
|
# currently, so it's good for compatibility.
|
||||||
tok = re.compile(str2u(
|
tok = re.compile(str2u( # Python 3.5 does not recognize ur'...' literals
|
||||||
r'(?:'
|
r'(?:'
|
||||||
r'/(?:\?\?/\n|\\\n)*\*.*?\*(?:\?\?/\n|\\\n)*/'
|
r'/(?:\?\?/\n|\\\n)*\*.*?\*(?:\?\?/\n|\\\n)*/'
|
||||||
r'|/(?:\?\?/\n|\\\n)*/(?:\?\?/\n|\\\n|[^\n])*\n'
|
r'|/(?:\?\?/\n|\\\n)*/(?:\?\?/\n|\\\n|[^\n])*\n'
|
||||||
r'|[^"]'
|
r'|[^"]'
|
||||||
r')+'
|
r')+'
|
||||||
r'|"'
|
r'|"'
|
||||||
), re.S)
|
, 'utf8'), re.S)
|
||||||
# RE used inside strings.
|
# RE used inside strings.
|
||||||
tok2 = re.compile(str2u(
|
tok2 = re.compile(str2u(
|
||||||
r'(?:'
|
r'(?:'
|
||||||
r"\?\?[='()!<>-]" # valid trigraph except ??/ (backslash)
|
r"\?\?[='()!<>-]" # valid trigraph except ??/ (backslash)
|
||||||
r"|(?:\?\?/|\\)(?:\?\?[/='()!<>-]|[^\n])"
|
r"|(?:\?\?/|\\)(?:\?\?[/='()!<>-]|[^\n])"
|
||||||
# backslash trigraph or actual backslash,
|
# backslash trigraph or actual backslash,
|
||||||
# followed by any trigraph or non-newline
|
# followed by any trigraph or non-newline
|
||||||
r'|(?!\?\?/\n|\\\n|"|\n).'
|
r'|(?!\?\?/\n|\\\n|"|\n).'
|
||||||
# any character that doesn't start a trigraph/
|
# any character that doesn't start a trigraph/
|
||||||
# backslash escape followed by a newline
|
# backslash escape followed by a newline
|
||||||
# or is a newline or double quote, as we're
|
# or is a newline or double quote, as we're
|
||||||
# interested in all those individually.
|
# interested in all those individually.
|
||||||
r')+' # as many of those as possible
|
r')' # as many of those as possible
|
||||||
r'|\?\?/\n|\\\n|\n|"' # or any of those individually
|
r'|\?\?/\n|\\\n|\n|"' # or any of those individually
|
||||||
))
|
, 'utf8'))
|
||||||
|
|
||||||
pos = 0
|
pos = 0
|
||||||
match = tok.search(script, pos)
|
match = tok.search(script, pos)
|
||||||
|
@ -157,24 +157,24 @@ def PreparePreproc(script):
|
||||||
matched2 = match2.group(0)
|
matched2 = match2.group(0)
|
||||||
pos += len(matched2)
|
pos += len(matched2)
|
||||||
|
|
||||||
if matched2 == b'\\\n' or matched2 == b'??/\n':
|
if matched2 == u'\\\n' or matched2 == u'??/\n':
|
||||||
nlines += 1
|
nlines += 1
|
||||||
col = 0
|
col = 0
|
||||||
match2 = tok2.search(script, pos)
|
match2 = tok2.search(script, pos)
|
||||||
continue
|
continue
|
||||||
if matched2 == b'"':
|
if matched2 == u'"':
|
||||||
if nlines:
|
if nlines:
|
||||||
if script[pos:pos+1] == b'\n':
|
if script[pos:pos+1] == u'\n':
|
||||||
col = -1 # don't add spaces if not necessary
|
col = -1 # don't add spaces if not necessary
|
||||||
# col misses the quote added here, so add 1
|
# col misses the quote added here, so add 1
|
||||||
s += b'"' + b'\n'*nlines + b' '*(col+1)
|
s += u'"' + u'\n'*nlines + u' '*(col+1)
|
||||||
else:
|
else:
|
||||||
s += b'"'
|
s += u'"'
|
||||||
break
|
break
|
||||||
if matched2 == b'\n':
|
if matched2 == u'\n':
|
||||||
nlines += 1
|
nlines += 1
|
||||||
col = 0
|
col = 0
|
||||||
s += b'\\n'
|
s += u'\\n'
|
||||||
else:
|
else:
|
||||||
col += len(matched2)
|
col += len(matched2)
|
||||||
s += matched2
|
s += matched2
|
||||||
|
@ -453,7 +453,7 @@ def main(argv):
|
||||||
if chgfix[1:] not in validoptions:
|
if chgfix[1:] not in validoptions:
|
||||||
Usage(argv[0], 'optimizer-options')
|
Usage(argv[0], 'optimizer-options')
|
||||||
werr(u"\nError: Unrecognized"
|
werr(u"\nError: Unrecognized"
|
||||||
u" optimizer option: %s\n" % chg.decode('utf8'))
|
u" optimizer option: %s\n" % chg.decode('utf8'))
|
||||||
return 1
|
return 1
|
||||||
if chgfix[0] == '-':
|
if chgfix[0] == '-':
|
||||||
options.discard(chgfix[1:])
|
options.discard(chgfix[1:])
|
||||||
|
@ -642,6 +642,7 @@ def main(argv):
|
||||||
# Append user arguments at the end to allow them to override defaults
|
# Append user arguments at the end to allow them to override defaults
|
||||||
preproc_cmdline += preproc_user_postargs
|
preproc_cmdline += preproc_user_postargs
|
||||||
|
|
||||||
|
# Transform to bytes and check Unicode validity
|
||||||
if type(script) is unicode:
|
if type(script) is unicode:
|
||||||
script = script.encode('utf8')
|
script = script.encode('utf8')
|
||||||
else:
|
else:
|
||||||
|
|
10
run-tests.py
10
run-tests.py
|
@ -698,7 +698,7 @@ def generateScriptTests():
|
||||||
# Create a closure with the test data
|
# Create a closure with the test data
|
||||||
def makeTestFunction(fbase, suite):
|
def makeTestFunction(fbase, suite):
|
||||||
def TestFunction(self):
|
def TestFunction(self):
|
||||||
stdin = tryRead(fbase + '.lsl') or ''
|
stdin = tryRead(fbase + '.lsl') or b''
|
||||||
expected_stdout = tryRead(fbase + '.out') or b''
|
expected_stdout = tryRead(fbase + '.out') or b''
|
||||||
expected_stderr = tryRead(fbase + '.err') or b''
|
expected_stderr = tryRead(fbase + '.err') or b''
|
||||||
runargs = (parseArgs(tryRead(fbase + '.run', Binary=False))
|
runargs = (parseArgs(tryRead(fbase + '.run', Binary=False))
|
||||||
|
@ -734,8 +734,9 @@ def generateScriptTests():
|
||||||
werr(expected_stderr)
|
werr(expected_stderr)
|
||||||
werr(u'\n************ actual stderr:\n')
|
werr(u'\n************ actual stderr:\n')
|
||||||
werr(actual_stderr)
|
werr(actual_stderr)
|
||||||
if difflib and expected_stderr and actual_stderr:
|
if difflib and expected_stderr and actual_stderr \
|
||||||
sys.stderr.write(u'\n************ diff:\n'
|
and not expected_stderr.startswith(b'REGEX\n'):
|
||||||
|
werr(u'\n************ diff:\n'
|
||||||
+ u'\n'.join(difflib.unified_diff(
|
+ u'\n'.join(difflib.unified_diff(
|
||||||
b2u(expected_stderr).split(u'\n'),
|
b2u(expected_stderr).split(u'\n'),
|
||||||
b2u(actual_stderr).split(u'\n'),
|
b2u(actual_stderr).split(u'\n'),
|
||||||
|
@ -755,7 +756,8 @@ def generateScriptTests():
|
||||||
werr(expected_stdout)
|
werr(expected_stdout)
|
||||||
werr(u'\n************ actual stdout:\n')
|
werr(u'\n************ actual stdout:\n')
|
||||||
werr(actual_stdout)
|
werr(actual_stdout)
|
||||||
if difflib and expected_stdout and actual_stdout:
|
if difflib and expected_stdout and actual_stdout \
|
||||||
|
and not expected_stdout.startswith(b'REGEX\n'):
|
||||||
werr(u'\n************ diff:\n'
|
werr(u'\n************ diff:\n'
|
||||||
+ u'\n'.join(difflib.unified_diff(
|
+ u'\n'.join(difflib.unified_diff(
|
||||||
b2u(expected_stdout).split('\n'),
|
b2u(expected_stdout).split('\n'),
|
||||||
|
|
|
@ -31,8 +31,7 @@ if sys.version_info.major >= 3:
|
||||||
|
|
||||||
def str2b(s, enc=None):
|
def str2b(s, enc=None):
|
||||||
"""Convert a native Python3 str to bytes, with the given encoding."""
|
"""Convert a native Python3 str to bytes, with the given encoding."""
|
||||||
return s.encode(enc if type(enc) == str
|
return s.encode(getattr(enc, 'encoding', enc) or 'utf8',
|
||||||
else getattr(enc, 'encoding', 'utf8'),
|
|
||||||
'backslashreplace')
|
'backslashreplace')
|
||||||
|
|
||||||
def u2str(s, enc=None):
|
def u2str(s, enc=None):
|
||||||
|
@ -56,8 +55,7 @@ else:
|
||||||
|
|
||||||
def u2str(s, enc=None):
|
def u2str(s, enc=None):
|
||||||
"""Convert a Unicode string to native Python 2 str."""
|
"""Convert a Unicode string to native Python 2 str."""
|
||||||
return s.encode(enc if type(enc) == str
|
return s.encode(getattr(enc, 'encoding', enc) or 'utf8',
|
||||||
else getattr(enc, 'encoding', 'utf8'),
|
|
||||||
'backslashreplace')
|
'backslashreplace')
|
||||||
|
|
||||||
def b2str(s, enc=None):
|
def b2str(s, enc=None):
|
||||||
|
@ -70,7 +68,7 @@ def b2u(s, enc=None):
|
||||||
|
|
||||||
def u2b(s, enc=None):
|
def u2b(s, enc=None):
|
||||||
"""Unicode to Bytes"""
|
"""Unicode to Bytes"""
|
||||||
return u2str(str2b(s, enc), enc)
|
return str2b(u2str(s, enc), enc)
|
||||||
|
|
||||||
def any2b(s, enc=None):
|
def any2b(s, enc=None):
|
||||||
"""Bytes or Unicode to Bytes"""
|
"""Bytes or Unicode to Bytes"""
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue