mirror of
https://github.com/cookiengineer/audacity
synced 2025-11-06 17:13:49 +01:00
Update local LV2 libraries to latest versions
lilv-0.24.6 lv2-1.16.0 serd-0.30.2 sord-0.16.4 sratom-0.6.4 suil-0.10.6
This commit is contained in:
@@ -1,97 +1,296 @@
|
||||
#! /usr/bin/env python
|
||||
#!/usr/bin/env python
|
||||
# encoding: utf-8
|
||||
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
|
||||
# Carlos Rafael Giani, 2006
|
||||
# Thomas Nagy, 2010-2018 (ita)
|
||||
|
||||
"""
|
||||
Unit testing system for C/C++/D and interpreted languages providing test execution:
|
||||
|
||||
* in parallel, by using ``waf -j``
|
||||
* partial (only the tests that have changed) or full (by using ``waf --alltests``)
|
||||
|
||||
The tests are declared by adding the **test** feature to programs::
|
||||
|
||||
def options(opt):
|
||||
opt.load('compiler_cxx waf_unit_test')
|
||||
def configure(conf):
|
||||
conf.load('compiler_cxx waf_unit_test')
|
||||
def build(bld):
|
||||
bld(features='cxx cxxprogram test', source='main.cpp', target='app')
|
||||
# or
|
||||
bld.program(features='test', source='main2.cpp', target='app2')
|
||||
|
||||
When the build is executed, the program 'test' will be built and executed without arguments.
|
||||
The success/failure is detected by looking at the return code. The status and the standard output/error
|
||||
are stored on the build context.
|
||||
|
||||
The results can be displayed by registering a callback function. Here is how to call
|
||||
the predefined callback::
|
||||
|
||||
def build(bld):
|
||||
bld(features='cxx cxxprogram test', source='main.c', target='app')
|
||||
from waflib.Tools import waf_unit_test
|
||||
bld.add_post_fun(waf_unit_test.summary)
|
||||
|
||||
By passing --dump-test-scripts the build outputs corresponding python files
|
||||
(with extension _run.py) that are useful for debugging purposes.
|
||||
"""
|
||||
|
||||
import os, shlex, sys
|
||||
from waflib.TaskGen import feature, after_method, taskgen_method
|
||||
from waflib import Utils, Task, Logs, Options
|
||||
from waflib.Tools import ccroot
|
||||
testlock = Utils.threading.Lock()
|
||||
|
||||
SCRIPT_TEMPLATE = """#! %(python)s
|
||||
import subprocess, sys
|
||||
cmd = %(cmd)r
|
||||
# if you want to debug with gdb:
|
||||
#cmd = ['gdb', '-args'] + cmd
|
||||
env = %(env)r
|
||||
status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str))
|
||||
sys.exit(status)
|
||||
"""
|
||||
|
||||
@taskgen_method
|
||||
def handle_ut_cwd(self, key):
|
||||
"""
|
||||
Task generator method, used internally to limit code duplication.
|
||||
This method may disappear anytime.
|
||||
"""
|
||||
cwd = getattr(self, key, None)
|
||||
if cwd:
|
||||
if isinstance(cwd, str):
|
||||
# we want a Node instance
|
||||
if os.path.isabs(cwd):
|
||||
self.ut_cwd = self.bld.root.make_node(cwd)
|
||||
else:
|
||||
self.ut_cwd = self.path.make_node(cwd)
|
||||
|
||||
@feature('test_scripts')
|
||||
def make_interpreted_test(self):
|
||||
"""Create interpreted unit tests."""
|
||||
for x in ['test_scripts_source', 'test_scripts_template']:
|
||||
if not hasattr(self, x):
|
||||
Logs.warn('a test_scripts taskgen i missing %s' % x)
|
||||
return
|
||||
|
||||
self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False))
|
||||
|
||||
script_nodes = self.to_nodes(self.test_scripts_source)
|
||||
for script_node in script_nodes:
|
||||
tsk = self.create_task('utest', [script_node])
|
||||
tsk.vars = lst + tsk.vars
|
||||
tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd())
|
||||
|
||||
self.handle_ut_cwd('test_scripts_cwd')
|
||||
|
||||
env = getattr(self, 'test_scripts_env', None)
|
||||
if env:
|
||||
self.ut_env = env
|
||||
else:
|
||||
self.ut_env = dict(os.environ)
|
||||
|
||||
paths = getattr(self, 'test_scripts_paths', {})
|
||||
for (k,v) in paths.items():
|
||||
p = self.ut_env.get(k, '').split(os.pathsep)
|
||||
if isinstance(v, str):
|
||||
v = v.split(os.pathsep)
|
||||
self.ut_env[k] = os.pathsep.join(p + v)
|
||||
|
||||
import os,sys
|
||||
from waflib.TaskGen import feature,after_method
|
||||
from waflib import Utils,Task,Logs,Options
|
||||
testlock=Utils.threading.Lock()
|
||||
@feature('test')
|
||||
@after_method('apply_link')
|
||||
@after_method('apply_link', 'process_use')
|
||||
def make_test(self):
|
||||
if getattr(self,'link_task',None):
|
||||
self.create_task('utest',self.link_task.outputs)
|
||||
"""Create the unit test task. There can be only one unit test task by task generator."""
|
||||
if not getattr(self, 'link_task', None):
|
||||
return
|
||||
|
||||
tsk = self.create_task('utest', self.link_task.outputs)
|
||||
if getattr(self, 'ut_str', None):
|
||||
self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False))
|
||||
tsk.vars = lst + tsk.vars
|
||||
|
||||
self.handle_ut_cwd('ut_cwd')
|
||||
|
||||
if not hasattr(self, 'ut_paths'):
|
||||
paths = []
|
||||
for x in self.tmp_use_sorted:
|
||||
try:
|
||||
y = self.bld.get_tgen_by_name(x).link_task
|
||||
except AttributeError:
|
||||
pass
|
||||
else:
|
||||
if not isinstance(y, ccroot.stlink_task):
|
||||
paths.append(y.outputs[0].parent.abspath())
|
||||
self.ut_paths = os.pathsep.join(paths) + os.pathsep
|
||||
|
||||
if not hasattr(self, 'ut_env'):
|
||||
self.ut_env = dct = dict(os.environ)
|
||||
def add_path(var):
|
||||
dct[var] = self.ut_paths + dct.get(var,'')
|
||||
if Utils.is_win32:
|
||||
add_path('PATH')
|
||||
elif Utils.unversioned_sys_platform() == 'darwin':
|
||||
add_path('DYLD_LIBRARY_PATH')
|
||||
add_path('LD_LIBRARY_PATH')
|
||||
else:
|
||||
add_path('LD_LIBRARY_PATH')
|
||||
|
||||
if not hasattr(self, 'ut_cmd'):
|
||||
self.ut_cmd = getattr(Options.options, 'testcmd', False)
|
||||
|
||||
@taskgen_method
|
||||
def add_test_results(self, tup):
|
||||
"""Override and return tup[1] to interrupt the build immediately if a test does not run"""
|
||||
Logs.debug("ut: %r", tup)
|
||||
try:
|
||||
self.utest_results.append(tup)
|
||||
except AttributeError:
|
||||
self.utest_results = [tup]
|
||||
try:
|
||||
self.bld.utest_results.append(tup)
|
||||
except AttributeError:
|
||||
self.bld.utest_results = [tup]
|
||||
|
||||
@Task.deep_inputs
|
||||
class utest(Task.Task):
|
||||
color='PINK'
|
||||
after=['vnum','inst']
|
||||
vars=[]
|
||||
"""
|
||||
Execute a unit test
|
||||
"""
|
||||
color = 'PINK'
|
||||
after = ['vnum', 'inst']
|
||||
vars = []
|
||||
|
||||
def runnable_status(self):
|
||||
if getattr(Options.options,'no_tests',False):
|
||||
"""
|
||||
Always execute the task if `waf --alltests` was used or no
|
||||
tests if ``waf --notests`` was used
|
||||
"""
|
||||
if getattr(Options.options, 'no_tests', False):
|
||||
return Task.SKIP_ME
|
||||
ret=super(utest,self).runnable_status()
|
||||
if ret==Task.SKIP_ME:
|
||||
if getattr(Options.options,'all_tests',False):
|
||||
|
||||
ret = super(utest, self).runnable_status()
|
||||
if ret == Task.SKIP_ME:
|
||||
if getattr(Options.options, 'all_tests', False):
|
||||
return Task.RUN_ME
|
||||
return ret
|
||||
|
||||
def get_test_env(self):
|
||||
"""
|
||||
In general, tests may require any library built anywhere in the project.
|
||||
Override this method if fewer paths are needed
|
||||
"""
|
||||
return self.generator.ut_env
|
||||
|
||||
def post_run(self):
|
||||
super(utest, self).post_run()
|
||||
if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]:
|
||||
self.generator.bld.task_sigs[self.uid()] = None
|
||||
|
||||
def run(self):
|
||||
filename=self.inputs[0].abspath()
|
||||
self.ut_exec=getattr(self.generator,'ut_exec',[filename])
|
||||
if getattr(self.generator,'ut_fun',None):
|
||||
self.generator.ut_fun(self)
|
||||
try:
|
||||
fu=getattr(self.generator.bld,'all_test_paths')
|
||||
except AttributeError:
|
||||
fu=os.environ.copy()
|
||||
lst=[]
|
||||
for g in self.generator.bld.groups:
|
||||
for tg in g:
|
||||
if getattr(tg,'link_task',None):
|
||||
s=tg.link_task.outputs[0].parent.abspath()
|
||||
if s not in lst:
|
||||
lst.append(s)
|
||||
def add_path(dct,path,var):
|
||||
dct[var]=os.pathsep.join(Utils.to_list(path)+[os.environ.get(var,'')])
|
||||
if Utils.is_win32:
|
||||
add_path(fu,lst,'PATH')
|
||||
elif Utils.unversioned_sys_platform()=='darwin':
|
||||
add_path(fu,lst,'DYLD_LIBRARY_PATH')
|
||||
add_path(fu,lst,'LD_LIBRARY_PATH')
|
||||
else:
|
||||
add_path(fu,lst,'LD_LIBRARY_PATH')
|
||||
self.generator.bld.all_test_paths=fu
|
||||
cwd=getattr(self.generator,'ut_cwd','')or self.inputs[0].parent.abspath()
|
||||
testcmd=getattr(Options.options,'testcmd',False)
|
||||
if testcmd:
|
||||
self.ut_exec=(testcmd%self.ut_exec[0]).split(' ')
|
||||
proc=Utils.subprocess.Popen(self.ut_exec,cwd=cwd,env=fu,stderr=Utils.subprocess.PIPE,stdout=Utils.subprocess.PIPE)
|
||||
(stdout,stderr)=proc.communicate()
|
||||
tup=(filename,proc.returncode,stdout,stderr)
|
||||
self.generator.utest_result=tup
|
||||
"""
|
||||
Execute the test. The execution is always successful, and the results
|
||||
are stored on ``self.generator.bld.utest_results`` for postprocessing.
|
||||
|
||||
Override ``add_test_results`` to interrupt the build
|
||||
"""
|
||||
if hasattr(self.generator, 'ut_run'):
|
||||
return self.generator.ut_run(self)
|
||||
|
||||
self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()])
|
||||
ut_cmd = getattr(self.generator, 'ut_cmd', False)
|
||||
if ut_cmd:
|
||||
self.ut_exec = shlex.split(ut_cmd % ' '.join(self.ut_exec))
|
||||
|
||||
return self.exec_command(self.ut_exec)
|
||||
|
||||
def exec_command(self, cmd, **kw):
|
||||
self.generator.bld.log_command(cmd, kw)
|
||||
if getattr(Options.options, 'dump_test_scripts', False):
|
||||
script_code = SCRIPT_TEMPLATE % {
|
||||
'python': sys.executable,
|
||||
'env': self.get_test_env(),
|
||||
'cwd': self.get_cwd().abspath(),
|
||||
'cmd': cmd
|
||||
}
|
||||
script_file = self.inputs[0].abspath() + '_run.py'
|
||||
Utils.writef(script_file, script_code, encoding='utf-8')
|
||||
os.chmod(script_file, Utils.O755)
|
||||
if Logs.verbose > 1:
|
||||
Logs.info('Test debug file written as %r' % script_file)
|
||||
|
||||
proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(),
|
||||
stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str))
|
||||
(stdout, stderr) = proc.communicate()
|
||||
self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr)
|
||||
testlock.acquire()
|
||||
try:
|
||||
bld=self.generator.bld
|
||||
Logs.debug("ut: %r",tup)
|
||||
try:
|
||||
bld.utest_results.append(tup)
|
||||
except AttributeError:
|
||||
bld.utest_results=[tup]
|
||||
return self.generator.add_test_results(tup)
|
||||
finally:
|
||||
testlock.release()
|
||||
|
||||
def get_cwd(self):
|
||||
return getattr(self.generator, 'ut_cwd', self.inputs[0].parent)
|
||||
|
||||
def summary(bld):
|
||||
lst=getattr(bld,'utest_results',[])
|
||||
"""
|
||||
Display an execution summary::
|
||||
|
||||
def build(bld):
|
||||
bld(features='cxx cxxprogram test', source='main.c', target='app')
|
||||
from waflib.Tools import waf_unit_test
|
||||
bld.add_post_fun(waf_unit_test.summary)
|
||||
"""
|
||||
lst = getattr(bld, 'utest_results', [])
|
||||
if lst:
|
||||
Logs.pprint('CYAN','execution summary')
|
||||
total=len(lst)
|
||||
tfail=len([x for x in lst if x[1]])
|
||||
Logs.pprint('CYAN',' tests that pass %d/%d'%(total-tfail,total))
|
||||
for(f,code,out,err)in lst:
|
||||
Logs.pprint('CYAN', 'execution summary')
|
||||
|
||||
total = len(lst)
|
||||
tfail = len([x for x in lst if x[1]])
|
||||
|
||||
Logs.pprint('GREEN', ' tests that pass %d/%d' % (total-tfail, total))
|
||||
for (f, code, out, err) in lst:
|
||||
if not code:
|
||||
Logs.pprint('CYAN',' %s'%f)
|
||||
Logs.pprint('CYAN',' tests that fail %d/%d'%(tfail,total))
|
||||
for(f,code,out,err)in lst:
|
||||
Logs.pprint('GREEN', ' %s' % f)
|
||||
|
||||
Logs.pprint('GREEN' if tfail == 0 else 'RED', ' tests that fail %d/%d' % (tfail, total))
|
||||
for (f, code, out, err) in lst:
|
||||
if code:
|
||||
Logs.pprint('CYAN',' %s'%f)
|
||||
Logs.pprint('RED', ' %s' % f)
|
||||
|
||||
def set_exit_code(bld):
|
||||
lst=getattr(bld,'utest_results',[])
|
||||
for(f,code,out,err)in lst:
|
||||
"""
|
||||
If any of the tests fail waf will exit with that exit code.
|
||||
This is useful if you have an automated build system which need
|
||||
to report on errors from the tests.
|
||||
You may use it like this:
|
||||
|
||||
def build(bld):
|
||||
bld(features='cxx cxxprogram test', source='main.c', target='app')
|
||||
from waflib.Tools import waf_unit_test
|
||||
bld.add_post_fun(waf_unit_test.set_exit_code)
|
||||
"""
|
||||
lst = getattr(bld, 'utest_results', [])
|
||||
for (f, code, out, err) in lst:
|
||||
if code:
|
||||
msg=[]
|
||||
msg = []
|
||||
if out:
|
||||
msg.append('stdout:%s%s'%(os.linesep,out.decode('utf-8')))
|
||||
msg.append('stdout:%s%s' % (os.linesep, out.decode('utf-8')))
|
||||
if err:
|
||||
msg.append('stderr:%s%s'%(os.linesep,err.decode('utf-8')))
|
||||
msg.append('stderr:%s%s' % (os.linesep, err.decode('utf-8')))
|
||||
bld.fatal(os.linesep.join(msg))
|
||||
|
||||
|
||||
def options(opt):
|
||||
opt.add_option('--notests',action='store_true',default=False,help='Exec no unit tests',dest='no_tests')
|
||||
opt.add_option('--alltests',action='store_true',default=False,help='Exec all unit tests',dest='all_tests')
|
||||
opt.add_option('--testcmd',action='store',default=False,help='Run the unit tests using the test-cmd string'' example "--test-cmd="valgrind --error-exitcode=1'' %s" to run under valgrind',dest='testcmd')
|
||||
"""
|
||||
Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options.
|
||||
"""
|
||||
opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests')
|
||||
opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests')
|
||||
opt.add_option('--clear-failed', action='store_true', default=False,
|
||||
help='Force failed unit tests to run again next time', dest='clear_failed_tests')
|
||||
opt.add_option('--testcmd', action='store', default=False, dest='testcmd',
|
||||
help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind')
|
||||
opt.add_option('--dump-test-scripts', action='store_true', default=False,
|
||||
help='Create python scripts to help debug tests', dest='dump_test_scripts')
|
||||
|
||||
|
||||
Reference in New Issue
Block a user