Skip to content

Loading…

Generate proper line endings on Windows #1749

Merged
merged 6 commits into from

2 participants

@twpayne

This is an attempt to fix the problem reported by @bentrm in #1747.

@bentrm

This looks very promissing with the following output running build target lint:

2014-02-23 15:08:21,934 build/check-whitespace-timestamp: build/src/external/externs/types.js: trailing whitespace at en
d of file
2014-02-23 15:08:21,940 build/check-whitespace-timestamp: build/src/external/src/exports.js: trailing whitespace at end
of file
2014-02-23 15:08:21,946 build/check-whitespace-timestamp: build/src/internal/src/types.js: trailing whitespace at end of
 file
2014-02-23 15:08:22,186 pake: build/check-whitespace-timestamp: 3 whitespace errors

So only whitespace errors on file endings are leftover.
I'm using core.autocrlf=input to make sure no newline caracters are converted on checkout but LF is honored when adding changes to the repo.

@bentrm

This pull request in combination with https://github.com/twpayne/pake/pull/2 allows linting of the codebase with no errors in a Windows environment.

@twpayne

Awesome, thanks very much for the investigative work @bentrm. I've just added a commit that updates pake in ol3 and I'll merge as soon as Travis passes.

@twpayne twpayne merged commit 69addbc into openlayers:master

1 check passed

Details default The Travis CI build passed
@twpayne twpayne deleted the twpayne:windows-newlines branch
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Showing with 155 additions and 32 deletions.
  1. +6 −6 bin/combine-examples.py
  2. +1 −1 bin/generate-exports.py
  3. +3 −3 bin/pyglslunit.py
  4. +14 −14 build.py
  5. +131 −8 pake.py
View
12 bin/combine-examples.py
@@ -8,19 +8,19 @@ def main(argv):
examples = {}
requires = set()
for filename in argv[1:]:
- lines = open(filename).readlines()
+ lines = open(filename, 'rU').readlines()
if len(lines) > 0 and lines[0].startswith('// NOCOMPILE'):
continue
requires.update(line for line in lines if line.startswith('goog.require'))
examples[filename] = [line for line in lines if not line.startswith('goog.require')]
for require in sorted(requires):
- print require,
+ sys.stdout.write(require)
for filename in sorted(examples.keys()):
- print '// ', filename
- print '(function(){'
+ sys.stdout.write('// ' + filename + '\n')
+ sys.stdout.write('(function(){\n')
for line in examples[filename]:
- print line,
- print '})();'
+ sys.stdout.write(line)
+ sys.stdout.write('})();\n')
if __name__ == '__main__':
View
2 bin/generate-exports.py
@@ -113,7 +113,7 @@ def main(argv):
for arg in args:
in_comment = False
object_literal = None
- for line in open(arg):
+ for line in open(arg, 'rU'):
line = line.strip()
if not line:
continue
View
6 bin/pyglslunit.py
@@ -52,7 +52,7 @@ def main(argv):
common, vertex, fragment = [], [], []
attributes, uniforms, varyings = {}, {}, {}
block = None
- for line in open(options.input):
+ for line in open(options.input, 'rU'):
if line.startswith('//!'):
m = re.match(r'//!\s+NAMESPACE=(\S+)\s*\Z', line)
if m:
@@ -111,10 +111,10 @@ def main(argv):
context['getUniforms'] = [uniforms[u] for u in sorted(uniforms.keys())]
if options.output and options.output != '-':
- output = open(options.output, 'w')
+ output = open(options.output, 'wb')
else:
output = sys.stdout
- output.write(pystache.render(open(options.template).read(), context))
+ output.write(pystache.render(open(options.template, 'rb').read(), context))
if __name__ == '__main__':
View
28 build.py
@@ -153,7 +153,7 @@
def report_sizes(t):
stringio = StringIO()
gzipfile = gzip.GzipFile(t.name, 'w', 9, stringio)
- with open(t.name) as f:
+ with open(t.name, 'rb') as f:
shutil.copyfileobj(f, gzipfile)
gzipfile.close()
rawsize = os.stat(t.name).st_size
@@ -251,11 +251,11 @@ def shader_src(t):
def _build_require_list(dependencies, output_file_name):
requires = set()
for dependency in dependencies:
- for line in open(dependency):
+ for line in open(dependency, 'rU'):
match = re.match(r'goog\.provide\(\'(.*)\'\);', line)
if match:
requires.add(match.group(1))
- with open(output_file_name, 'w') as f:
+ with open(output_file_name, 'wb') as f:
for require in sorted(requires):
f.write('goog.require(\'%s\');\n' % (require,))
@@ -334,7 +334,7 @@ def action(t):
'../externs/vbarray.js',
],
})
- with open(t.name, 'w') as f:
+ with open(t.name, 'wb') as f:
f.write(content)
dependencies = [__file__, 'buildcfg/base.json']
return Target(name, action=action, dependencies=dependencies)
@@ -449,7 +449,7 @@ def build_check_requires_timestamp(t):
# the generated regular expression to exceed Python's limits
if zi.filename.startswith('closure/goog/i18n/'):
continue
- for line in zf.open(zi):
+ for line in zf.open(zi, 'rU'):
m = re.match(r'goog.provide\(\'(.*)\'\);', line)
if m:
all_provides.add(m.group(1))
@@ -458,7 +458,7 @@ def build_check_requires_timestamp(t):
continue
require_linenos = {}
uses = set()
- lines = open(filename).readlines()
+ lines = open(filename, 'rU').readlines()
for lineno, line in _strip_comments(lines):
m = re.match(r'goog.provide\(\'(.*)\'\);', line)
if m:
@@ -541,7 +541,7 @@ def build_re(self, key):
requires = set()
uses = set()
uses_linenos = {}
- for lineno, line in _strip_comments(open(filename)):
+ for lineno, line in _strip_comments(open(filename, 'rU')):
m = re.match(r'goog.provide\(\'(.*)\'\);', line)
if m:
provides.add(m.group(1))
@@ -590,7 +590,7 @@ def build_check_whitespace_timestamp(t):
errors = 0
for filename in sorted(t.newer(t.dependencies)):
whitespace = False
- for lineno, line in enumerate(open(filename)):
+ for lineno, line in enumerate(open(filename, 'rU')):
if CR_RE.search(line):
t.info('%s:%d: carriage return character in line', filename, lineno + 1)
errors += 1
@@ -624,8 +624,8 @@ def plovr_jar(t):
@target('build/jsdoc-%(BRANCH)s-timestamp' % vars(variables), 'host-resources',
- 'build/src/external/src/exports.js', 'build/src/external/src/types.js',
- SRC, SHADER_SRC, ifind('apidoc/template'))
+ 'build/src/external/src/exports.js', SRC, SHADER_SRC,
+ ifind('apidoc/template'))
def jsdoc_BRANCH_timestamp(t):
t.run('%(JSDOC)s', '-c', 'apidoc/conf.json', 'src', 'apidoc/index.md',
'-d', 'build/hosted/%(BRANCH)s/apidoc')
@@ -633,7 +633,7 @@ def jsdoc_BRANCH_timestamp(t):
def split_example_file(example, dst_dir):
- lines = open(example).readlines()
+ lines = open(example, 'rU').readlines()
target_lines = []
target_require_lines = []
@@ -651,11 +651,11 @@ def split_example_file(example, dst_dir):
target_lines.append(line)
target = open(
- os.path.join(dst_dir, os.path.basename(example)), 'w')
+ os.path.join(dst_dir, os.path.basename(example)), 'wb')
target_require = open(
os.path.join(dst_dir, os.path.basename(example)
.replace('.js', '-require.js')),
- 'w')
+ 'wb')
target.writelines(target_lines)
target.close()
@@ -709,7 +709,7 @@ def host_examples(t):
def check_examples(t):
examples = ['build/hosted/%(BRANCH)s/' + e
for e in EXAMPLES
- if not open(e.replace('.html', '.js')).readline().startswith('// NOCOMPILE')]
+ if not open(e.replace('.html', '.js'), 'rU').readline().startswith('// NOCOMPILE')]
all_examples = \
[e + '?mode=advanced' for e in examples]
for example in all_examples:
View
139 pake.py
@@ -71,14 +71,31 @@ def __str__(self):
return 'duplicate target %r' % (self.target.name,)
+class UnknownTargetError(PakeError):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return 'unknown target %r' % (self.name,)
+
+
class Target(object):
+ """Target is the core object of pake. It includes all of the target's name
+ (which may or may not correspond to a real file in the filesystem, see the
+ comments in virtual and TargetCollection below), the action to be performed
+ when this target is to be rebuilt, its dependencies, and various other
+ metadata."""
def __init__(self, name, action=None, clean=True, dependencies=(),
- makedirs=True, phony=False, precious=False):
+ help=None, help_group=None, makedirs=True, phony=False,
+ precious=False):
self.name = name
self.action = action
self._clean = clean
self.dependencies = list(flatten(dependencies))
+ self.help = help
+ self.help_group = help_group
self._makedirs = makedirs
self.phony = phony
self.precious = precious
@@ -184,17 +201,26 @@ def newer(self, *args):
if targets.get(arg).timestamp > self.timestamp]
def output(self, *args, **kwargs):
+ """output runs the command passed to it, saving the output of the
+ command to the contents of the target. For example:
+ @target('ofile')
+ def ofile(t):
+ t.output('echo', '123')
+ After this target's action is executed, ofile will contain the string
+ "123"."""
args = flatten_expand_list(args)
self.info(' '.join(args))
try:
output = check_output(args, **kwargs)
- with open(self.name, 'w') as f:
+ with open(self.name, 'wb') as f:
f.write(output)
except subprocess.CalledProcessError as e:
self.clean(recurse=False)
self.error(e)
def rm_rf(self, *args):
+ """rm_rf recursively deletes the files and/or directories passed to
+ it."""
args = flatten_expand_list(args)
for arg in args:
self.info('rm -rf %s', arg)
@@ -211,6 +237,15 @@ def run(self, *args, **kwargs):
@contextlib.contextmanager
def tempdir(self):
+ """tempdir creates a temporary directory, changes to it, and runs the
+ nested block of code. However the nested block of code exits, tempdir
+ will delete the temporary directory permanently, before pake exits. For
+ example:
+ with t.tempdir():
+ # copy various files to $PWD (the temporary directory)
+ # zip up the contents of $PWD, or copy them somewhere else
+ However the above code exits (e.g. copy error or zip error), the
+ temporary directory will be cleaned up."""
tempdir = tempfile.mkdtemp()
self.info('mkdir -p %s', tempdir)
try:
@@ -220,20 +255,32 @@ def tempdir(self):
shutil.rmtree(tempdir, ignore_errors=True)
def touch(self):
+ """touch updates the timestamp of the target. If the target already
+ exists as a file in the filesystem its timestamp is updated, otherwise
+ a new file is created with the current timestamp."""
if os.path.exists(self.name):
os.utime(self.name, None)
else:
- with open(self.name, 'w'):
+ with open(self.name, 'wb'):
pass
class TargetCollection(object):
+ """TargetCollection implements a namespace for looking up build targets.
+ TargetCollection will first look for rules that match exactly, and then
+ - if no match is found - search through a list of regular expression-based
+ rules. As soon as a regular expression match is found, that rule is added
+ to the list of rules that match exactly. Typically, an invocation of pake
+ will only create a single TargetCollection."""
def __init__(self):
self.default = None
self.targets = {}
def add(self, target):
+ """add adds a concrete target to self, raising an error if the target
+ already exists. If target is the first target to be added, it becomes
+ the default for this TargetCollection."""
if target.name in self.targets:
raise DuplicateTargetError(target)
self.targets[target.name] = target
@@ -241,6 +288,11 @@ def add(self, target):
self.default = target
def get(self, name):
+ """get searches for a target. If it already exists, it is returned.
+ Otherwise, get searches through the defined rules, trying to find a
+ rule that matches. If it finds a matching rule, a concrete target is
+ instantiated, cached, and returned. If no match is found, a virtual
+ precious target is instantiated and returned."""
if name in self.targets:
return self.targets[name]
target = None
@@ -252,28 +304,74 @@ def get(self, name):
raise AmbiguousRuleError(name)
target = f(name, match)
if target is None:
- target = Target(name, precious=True)
+ if os.path.exists(name):
+ target = Target(name, precious=True)
+ else:
+ raise UnknownTargetError(name)
self.targets[name] = target
return target
+ def format_epilog(self, formatter):
+ helps_by_help_group = collections.defaultdict(dict)
+ max_name_len = 0
+ for name in sorted(self.targets):
+ target = self.targets[name]
+ if target.help is not None:
+ helps_by_help_group[target.help_group][name] = target.help
+ max_name_len = max(max_name_len, len(name))
+ lines = []
+ lines.append('Targets:\n')
+ format = ' %%-%ds %%s\n' % (max_name_len,)
+ for help_group in sorted(helps_by_help_group.keys()):
+ helps = helps_by_help_group[help_group]
+ if help_group is not None:
+ lines.append('%s targets:\n' % (help_group,))
+ for name in sorted(helps.keys()):
+ lines.append(format % (name, helps[name]))
+ return ''.join(lines)
+
class VariableCollection(object):
+ """VariableCollection implements an object with properties where the first
+ set of a property wins, and all further sets are ignored. For example:
+ vc = VariableCollection()
+ vc.FOO = 1 # First set of the property FOO
+ vc.FOO = 2 # Further sets of the property FOO are ignored, and do
+ # not raise an error. After this statement, vc.FOO is
+ # still 1.
+ print vc.FOO # Prints "1" """
def __init__(self, **kwargs):
for key, value in kwargs.iteritems():
setattr(self, key, value)
def __setattr__(self, key, value):
+ """Only set an attribute if it has not already been set. First to set
+ the value is the winner."""
if not hasattr(self, key):
object.__setattr__(self, key, value)
+# targets is the single TargetCollection instance created for this invokation
+# of pake
targets = TargetCollection()
+# rules is a dict of regular expressions to @rules where dynamically created
+# rules are registered.
rules = {}
+# variables is the global set of substitution variables, where the first setter
+# takes priority. The priority order is:
+# 1. Environment variables
+# 2. Command line arguments
+# 3. Internal Python settings in build.py
variables = VariableCollection(**os.environ)
def flatten(*args):
+ """flatten takes a variable number of arguments, each of which may or may
+ be not be a collection.Iterable, and yields the elements of each in
+ depth-first order. In short, it flattens nested iterables into a single
+ collection. For example, flatten(1, [2, (3, 4), 5], 6) yields 1, 2, 3, 4,
+ 5, 6."""
for arg in args:
if (isinstance(arg, collections.Iterable) and
not isinstance(arg, basestring)):
@@ -284,17 +382,21 @@ def flatten(*args):
def flatten_expand_list(*args):
+ """flatten_expand_list applies flatten, treats each element as a string,
+ and formats each string according to the global value of variables."""
return list(arg % vars(variables) for arg in flatten(args))
def ifind(*paths):
+ """ifind is an iterative version of os.walk, yielding all walked paths and
+ normalizing paths to use forward slashes."""
for path in paths:
for dirpath, dirnames, names in os.walk(path):
for name in names:
- if sys.platform == 'win32':
- yield '/'.join(dirpath.split('\\') + [name])
- else:
+ if os.sep == '/':
yield os.path.join(dirpath, name)
+ else:
+ yield '/'.join(dirpath.split(os.sep) + [name])
def main(argv=sys.argv):
@@ -310,6 +412,7 @@ def main(argv=sys.argv):
option_parser.add_option('-v', '--verbose',
action='count', dest='logging_level')
option_parser.set_defaults(logging_level=0)
+ option_parser.format_epilog = targets.format_epilog
options, args = option_parser.parse_args(argv[1:])
logging.basicConfig(format='%(asctime)s %(name)s: %(message)s',
level=logging.INFO - 10 * options.logging_level)
@@ -343,6 +446,11 @@ def main(argv=sys.argv):
def output(*args):
+ """output captures the output of a single command. It is typically used to
+ set variables that only need to be set once. For example:
+ UNAME_A = output('uname', '-a')
+ If you need to capture the output of a command in a target, you should use
+ t.output."""
args = flatten_expand_list(args)
logger.debug(' '.join(args))
return check_output(args)
@@ -355,6 +463,14 @@ def f(targetmaker):
def target(name, *dependencies, **kwargs):
+ """The @target decorator describes the action needed to build a single
+ target file when its dependencies are out of date. For example:
+ @target('hello', 'hello.c')
+ def hello(t):
+ t.run('gcc', '-o', t.name, t.dependencies)
+ # the above line will run gcc -o hello hello.c
+ See the documentation for Target to see the properties provide by the
+ target t."""
def f(action):
target = Target(name, action=action, dependencies=dependencies,
**kwargs)
@@ -363,6 +479,12 @@ def f(action):
def virtual(name, *dependencies, **kwargs):
+ """virtual targets are metatargets. They do not correspond to any real
+ file in the filesystem, even if a file with the same name already exists.
+ Virtual targets can be thought of as only existing for the duration of the
+ build. Their up-to-dateness or otherwise is independent of any existence
+ or up-to-dateness of any actual file in the filesystem. Typically they are
+ used to group actions such as "all", "build", or "test"."""
target = Target(name, dependencies=dependencies, clean=False, phony=True,
**kwargs)
targets.add(target)
@@ -370,7 +492,8 @@ def virtual(name, *dependencies, **kwargs):
def which(program):
"""Returns the full path of a given argument or `None`.
- See: http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python"""
+ See:
+ http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
Something went wrong with that request. Please try again.