`:
in general, a ``nogil`` block may contain only "white" code.
See examples in :ref:`determining_where_to_add_types` or
:ref:`primes`.
:param compiler_directives: Allow to set compiler directives in the ``setup.py`` like this:
``compiler_directives={'embedsignature': True}``.
See :ref:`compiler-directives`.
:param depfile: produce depfiles for the sources if True.
"""
if exclude is None:
exclude = []
if 'include_path' not in options:
options['include_path'] = ['.']
if 'common_utility_include_dir' in options:
safe_makedirs(options['common_utility_include_dir'])
depfile = options.pop('depfile', None)
if pythran is None:
pythran_options = None
else:
pythran_options = CompilationOptions(**options)
pythran_options.cplus = True
pythran_options.np_pythran = True
c_options = CompilationOptions(**options)
cpp_options = CompilationOptions(**options); cpp_options.cplus = True
ctx = c_options.create_context()
options = c_options
module_list, module_metadata = create_extension_list(
module_list,
exclude=exclude,
ctx=ctx,
quiet=quiet,
exclude_failures=exclude_failures,
language=language,
aliases=aliases)
deps = create_dependency_tree(ctx, quiet=quiet)
build_dir = getattr(options, 'build_dir', None)
def copy_to_build_dir(filepath, root=os.getcwd()):
filepath_abs = os.path.abspath(filepath)
if os.path.isabs(filepath):
filepath = filepath_abs
if filepath_abs.startswith(root):
# distutil extension depends are relative to cwd
mod_dir = join_path(build_dir,
os.path.dirname(_relpath(filepath, root)))
copy_once_if_newer(filepath_abs, mod_dir)
modules_by_cfile = collections.defaultdict(list)
to_compile = []
for m in module_list:
if build_dir:
for dep in m.depends:
copy_to_build_dir(dep)
cy_sources = [
source for source in m.sources
if os.path.splitext(source)[1] in ('.pyx', '.py')]
if len(cy_sources) == 1:
# normal "special" case: believe the Extension module name to allow user overrides
full_module_name = m.name
else:
# infer FQMN from source files
full_module_name = None
new_sources = []
for source in m.sources:
base, ext = os.path.splitext(source)
if ext in ('.pyx', '.py'):
if m.np_pythran:
c_file = base + '.cpp'
options = pythran_options
elif m.language == 'c++':
c_file = base + '.cpp'
options = cpp_options
else:
c_file = base + '.c'
options = c_options
# setup for out of place build directory if enabled
if build_dir:
if os.path.isabs(c_file):
warnings.warn("build_dir has no effect for absolute source paths")
c_file = os.path.join(build_dir, c_file)
dir = os.path.dirname(c_file)
safe_makedirs_once(dir)
# write out the depfile, if requested
if depfile:
dependencies = deps.all_dependencies(source)
src_base_dir, _ = os.path.split(source)
if not src_base_dir.endswith(os.sep):
src_base_dir += os.sep
# paths below the base_dir are relative, otherwise absolute
paths = []
for fname in dependencies:
if (fname.startswith(src_base_dir) or
fname.startswith('.' + os.path.sep)):
paths.append(os.path.relpath(fname, src_base_dir))
else:
paths.append(os.path.abspath(fname))
depline = os.path.split(c_file)[1] + ": \\\n "
depline += " \\\n ".join(paths) + "\n"
with open(c_file+'.dep', 'w') as outfile:
outfile.write(depline)
if os.path.exists(c_file):
c_timestamp = os.path.getmtime(c_file)
else:
c_timestamp = -1
# Priority goes first to modified files, second to direct
# dependents, and finally to indirect dependents.
if c_timestamp < deps.timestamp(source):
dep_timestamp, dep = deps.timestamp(source), source
priority = 0
else:
dep_timestamp, dep = deps.newest_dependency(source)
priority = 2 - (dep in deps.immediate_dependencies(source))
if force or c_timestamp < dep_timestamp:
if not quiet and not force:
if source == dep:
print("Compiling %s because it changed." % source)
else:
print("Compiling %s because it depends on %s." % (source, dep))
if not force and options.cache:
fingerprint = deps.transitive_fingerprint(source, m, options)
else:
fingerprint = None
to_compile.append((
priority, source, c_file, fingerprint, quiet,
options, not exclude_failures, module_metadata.get(m.name),
full_module_name))
new_sources.append(c_file)
modules_by_cfile[c_file].append(m)
else:
new_sources.append(source)
if build_dir:
copy_to_build_dir(source)
m.sources = new_sources
if options.cache:
if not os.path.exists(options.cache):
os.makedirs(options.cache)
to_compile.sort()
# Drop "priority" component of "to_compile" entries and add a
# simple progress indicator.
N = len(to_compile)
progress_fmt = "[{0:%d}/{1}] " % len(str(N))
for i in range(N):
progress = progress_fmt.format(i+1, N)
to_compile[i] = to_compile[i][1:] + (progress,)
if N <= 1:
nthreads = 0
if nthreads:
# Requires multiprocessing (or Python >= 2.6)
try:
import multiprocessing
pool = multiprocessing.Pool(
nthreads, initializer=_init_multiprocessing_helper)
except (ImportError, OSError):
print("multiprocessing required for parallel cythonization")
nthreads = 0
else:
# This is a bit more involved than it should be, because KeyboardInterrupts
# break the multiprocessing workers when using a normal pool.map().
# See, for example:
# http://noswap.com/blog/python-multiprocessing-keyboardinterrupt
try:
result = pool.map_async(cythonize_one_helper, to_compile, chunksize=1)
pool.close()
while not result.ready():
try:
result.get(99999) # seconds
except multiprocessing.TimeoutError:
pass
except KeyboardInterrupt:
pool.terminate()
raise
pool.join()
if not nthreads:
for args in to_compile:
cythonize_one(*args)
if exclude_failures:
failed_modules = set()
for c_file, modules in modules_by_cfile.items():
if not os.path.exists(c_file):
failed_modules.update(modules)
elif os.path.getsize(c_file) < 200:
f = io_open(c_file, 'r', encoding='iso8859-1')
try:
if f.read(len('#error ')) == '#error ':
# dead compilation result
failed_modules.update(modules)
finally:
f.close()
if failed_modules:
for module in failed_modules:
module_list.remove(module)
print("Failed compilations: %s" % ', '.join(sorted([
module.name for module in failed_modules])))
if options.cache:
cleanup_cache(options.cache, getattr(options, 'cache_size', 1024 * 1024 * 100))
# cythonize() is often followed by the (non-Python-buffered)
# compiler output, flush now to avoid interleaving output.
sys.stdout.flush()
return module_list
if os.environ.get('XML_RESULTS'):
compile_result_dir = os.environ['XML_RESULTS']
def record_results(func):
def with_record(*args):
t = time.time()
success = True
try:
try:
func(*args)
except:
success = False
finally:
t = time.time() - t
module = fully_qualified_name(args[0])
name = "cythonize." + module
failures = 1 - success
if success:
failure_item = ""
else:
failure_item = "failure"
output = open(os.path.join(compile_result_dir, name + ".xml"), "w")
output.write("""
%(failure_item)s
""".strip() % locals())
output.close()
return with_record
else:
def record_results(func):
return func
# TODO: Share context? Issue: pyx processing leaks into pxd module
@record_results
def cythonize_one(pyx_file, c_file, fingerprint, quiet, options=None,
raise_on_failure=True, embedded_metadata=None, full_module_name=None,
progress=""):
from ..Compiler.Main import compile_single, default_options
from ..Compiler.Errors import CompileError, PyrexError
if fingerprint:
if not os.path.exists(options.cache):
safe_makedirs(options.cache)
# Cython-generated c files are highly compressible.
# (E.g. a compression ratio of about 10 for Sage).
fingerprint_file_base = join_path(
options.cache, "%s-%s" % (os.path.basename(c_file), fingerprint))
gz_fingerprint_file = fingerprint_file_base + gzip_ext
zip_fingerprint_file = fingerprint_file_base + '.zip'
if os.path.exists(gz_fingerprint_file) or os.path.exists(zip_fingerprint_file):
if not quiet:
print("%sFound compiled %s in cache" % (progress, pyx_file))
if os.path.exists(gz_fingerprint_file):
os.utime(gz_fingerprint_file, None)
with contextlib.closing(gzip_open(gz_fingerprint_file, 'rb')) as g:
with contextlib.closing(open(c_file, 'wb')) as f:
shutil.copyfileobj(g, f)
else:
os.utime(zip_fingerprint_file, None)
dirname = os.path.dirname(c_file)
with contextlib.closing(zipfile.ZipFile(zip_fingerprint_file)) as z:
for artifact in z.namelist():
z.extract(artifact, os.path.join(dirname, artifact))
return
if not quiet:
print("%sCythonizing %s" % (progress, pyx_file))
if options is None:
options = CompilationOptions(default_options)
options.output_file = c_file
options.embedded_metadata = embedded_metadata
any_failures = 0
try:
result = compile_single(pyx_file, options, full_module_name=full_module_name)
if result.num_errors > 0:
any_failures = 1
except (EnvironmentError, PyrexError) as e:
sys.stderr.write('%s\n' % e)
any_failures = 1
# XXX
import traceback
traceback.print_exc()
except Exception:
if raise_on_failure:
raise
import traceback
traceback.print_exc()
any_failures = 1
if any_failures:
if raise_on_failure:
raise CompileError(None, pyx_file)
elif os.path.exists(c_file):
os.remove(c_file)
elif fingerprint:
artifacts = list(filter(None, [
getattr(result, attr, None)
for attr in ('c_file', 'h_file', 'api_file', 'i_file')]))
if len(artifacts) == 1:
fingerprint_file = gz_fingerprint_file
with contextlib.closing(open(c_file, 'rb')) as f:
with contextlib.closing(gzip_open(fingerprint_file + '.tmp', 'wb')) as g:
shutil.copyfileobj(f, g)
else:
fingerprint_file = zip_fingerprint_file
with contextlib.closing(zipfile.ZipFile(
fingerprint_file + '.tmp', 'w', zipfile_compression_mode)) as zip:
for artifact in artifacts:
zip.write(artifact, os.path.basename(artifact))
os.rename(fingerprint_file + '.tmp', fingerprint_file)
def cythonize_one_helper(m):
import traceback
try:
return cythonize_one(*m)
except Exception:
traceback.print_exc()
raise
def _init_multiprocessing_helper():
# KeyboardInterrupt kills workers, so don't let them get it
import signal
signal.signal(signal.SIGINT, signal.SIG_IGN)
def cleanup_cache(cache, target_size, ratio=.85):
try:
p = subprocess.Popen(['du', '-s', '-k', os.path.abspath(cache)], stdout=subprocess.PIPE)
res = p.wait()
if res == 0:
total_size = 1024 * int(p.stdout.read().strip().split()[0])
if total_size < target_size:
return
except (OSError, ValueError):
pass
total_size = 0
all = []
for file in os.listdir(cache):
path = join_path(cache, file)
s = os.stat(path)
total_size += s.st_size
all.append((s.st_atime, s.st_size, path))
if total_size > target_size:
for time, size, file in reversed(sorted(all)):
os.unlink(file)
total_size -= size
if total_size < target_size * ratio:
break
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/Distutils.py 0000644 0001751 0000171 00000000061 00000000000 020576 0 ustar 00runner docker 0000000 0000000 from Cython.Distutils.build_ext import build_ext
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/Inline.py 0000644 0001751 0000171 00000032213 00000000000 020034 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
import hashlib
import inspect
import os
import re
import sys
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
import Cython
from ..Compiler.Main import Context, default_options
from ..Compiler.Visitor import CythonTransform, EnvTransform
from ..Compiler.ParseTreeTransforms import SkipDeclarations
from ..Compiler.TreeFragment import parse_from_strings
from ..Compiler.StringEncoding import _unicode
from .Dependencies import strip_string_literals, cythonize, cached_function
from ..Compiler import Pipeline
from ..Utils import get_cython_cache_dir
import cython as cython_module
IS_PY3 = sys.version_info >= (3,)
# A utility function to convert user-supplied ASCII strings to unicode.
if not IS_PY3:
def to_unicode(s):
if isinstance(s, bytes):
return s.decode('ascii')
else:
return s
else:
to_unicode = lambda x: x
if sys.version_info < (3, 5):
import imp
def load_dynamic(name, module_path):
return imp.load_dynamic(name, module_path)
else:
import importlib.util as _importlib_util
def load_dynamic(name, module_path):
spec = _importlib_util.spec_from_file_location(name, module_path)
module = _importlib_util.module_from_spec(spec)
# sys.modules[name] = module
spec.loader.exec_module(module)
return module
class UnboundSymbols(EnvTransform, SkipDeclarations):
def __init__(self):
CythonTransform.__init__(self, None)
self.unbound = set()
def visit_NameNode(self, node):
if not self.current_env().lookup(node.name):
self.unbound.add(node.name)
return node
def __call__(self, node):
super(UnboundSymbols, self).__call__(node)
return self.unbound
@cached_function
def unbound_symbols(code, context=None):
code = to_unicode(code)
if context is None:
context = Context([], default_options)
from ..Compiler.ParseTreeTransforms import AnalyseDeclarationsTransform
tree = parse_from_strings('(tree fragment)', code)
for phase in Pipeline.create_pipeline(context, 'pyx'):
if phase is None:
continue
tree = phase(tree)
if isinstance(phase, AnalyseDeclarationsTransform):
break
try:
import builtins
except ImportError:
import __builtin__ as builtins
return tuple(UnboundSymbols()(tree) - set(dir(builtins)))
def unsafe_type(arg, context=None):
py_type = type(arg)
if py_type is int:
return 'long'
else:
return safe_type(arg, context)
def safe_type(arg, context=None):
py_type = type(arg)
if py_type in (list, tuple, dict, str):
return py_type.__name__
elif py_type is complex:
return 'double complex'
elif py_type is float:
return 'double'
elif py_type is bool:
return 'bint'
elif 'numpy' in sys.modules and isinstance(arg, sys.modules['numpy'].ndarray):
return 'numpy.ndarray[numpy.%s_t, ndim=%s]' % (arg.dtype.name, arg.ndim)
else:
for base_type in py_type.__mro__:
if base_type.__module__ in ('__builtin__', 'builtins'):
return 'object'
module = context.find_module(base_type.__module__, need_pxd=False)
if module:
entry = module.lookup(base_type.__name__)
if entry.is_type:
return '%s.%s' % (base_type.__module__, base_type.__name__)
return 'object'
def _get_build_extension():
dist = Distribution()
# Ensure the build respects distutils configuration by parsing
# the configuration files
config_files = dist.find_config_files()
dist.parse_config_files(config_files)
build_extension = build_ext(dist)
build_extension.finalize_options()
return build_extension
@cached_function
def _create_context(cython_include_dirs):
return Context(list(cython_include_dirs), default_options)
_cython_inline_cache = {}
_cython_inline_default_context = _create_context(('.',))
def _populate_unbound(kwds, unbound_symbols, locals=None, globals=None):
for symbol in unbound_symbols:
if symbol not in kwds:
if locals is None or globals is None:
calling_frame = inspect.currentframe().f_back.f_back.f_back
if locals is None:
locals = calling_frame.f_locals
if globals is None:
globals = calling_frame.f_globals
if symbol in locals:
kwds[symbol] = locals[symbol]
elif symbol in globals:
kwds[symbol] = globals[symbol]
else:
print("Couldn't find %r" % symbol)
def _inline_key(orig_code, arg_sigs, language_level):
key = orig_code, arg_sigs, sys.version_info, sys.executable, language_level, Cython.__version__
return hashlib.sha1(_unicode(key).encode('utf-8')).hexdigest()
def cython_inline(code, get_type=unsafe_type,
lib_dir=os.path.join(get_cython_cache_dir(), 'inline'),
cython_include_dirs=None, cython_compiler_directives=None,
force=False, quiet=False, locals=None, globals=None, language_level=None, **kwds):
if get_type is None:
get_type = lambda x: 'object'
ctx = _create_context(tuple(cython_include_dirs)) if cython_include_dirs else _cython_inline_default_context
cython_compiler_directives = dict(cython_compiler_directives) if cython_compiler_directives else {}
if language_level is None and 'language_level' not in cython_compiler_directives:
language_level = '3str'
if language_level is not None:
cython_compiler_directives['language_level'] = language_level
# Fast path if this has been called in this session.
_unbound_symbols = _cython_inline_cache.get(code)
if _unbound_symbols is not None:
_populate_unbound(kwds, _unbound_symbols, locals, globals)
args = sorted(kwds.items())
arg_sigs = tuple([(get_type(value, ctx), arg) for arg, value in args])
key_hash = _inline_key(code, arg_sigs, language_level)
invoke = _cython_inline_cache.get((code, arg_sigs, key_hash))
if invoke is not None:
arg_list = [arg[1] for arg in args]
return invoke(*arg_list)
orig_code = code
code = to_unicode(code)
code, literals = strip_string_literals(code)
code = strip_common_indent(code)
if locals is None:
locals = inspect.currentframe().f_back.f_back.f_locals
if globals is None:
globals = inspect.currentframe().f_back.f_back.f_globals
try:
_cython_inline_cache[orig_code] = _unbound_symbols = unbound_symbols(code)
_populate_unbound(kwds, _unbound_symbols, locals, globals)
except AssertionError:
if not quiet:
# Parsing from strings not fully supported (e.g. cimports).
print("Could not parse code as a string (to extract unbound symbols).")
cimports = []
for name, arg in list(kwds.items()):
if arg is cython_module:
cimports.append('\ncimport cython as %s' % name)
del kwds[name]
arg_names = sorted(kwds)
arg_sigs = tuple([(get_type(kwds[arg], ctx), arg) for arg in arg_names])
key_hash = _inline_key(orig_code, arg_sigs, language_level)
module_name = "_cython_inline_" + key_hash
if module_name in sys.modules:
module = sys.modules[module_name]
else:
build_extension = None
if cython_inline.so_ext is None:
# Figure out and cache current extension suffix
build_extension = _get_build_extension()
cython_inline.so_ext = build_extension.get_ext_filename('')
module_path = os.path.join(lib_dir, module_name + cython_inline.so_ext)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if force or not os.path.isfile(module_path):
cflags = []
c_include_dirs = []
qualified = re.compile(r'([.\w]+)[.]')
for type, _ in arg_sigs:
m = qualified.match(type)
if m:
cimports.append('\ncimport %s' % m.groups()[0])
# one special case
if m.groups()[0] == 'numpy':
import numpy
c_include_dirs.append(numpy.get_include())
# cflags.append('-Wno-unused')
module_body, func_body = extract_func_code(code)
params = ', '.join(['%s %s' % a for a in arg_sigs])
module_code = """
%(module_body)s
%(cimports)s
def __invoke(%(params)s):
%(func_body)s
return locals()
""" % {'cimports': '\n'.join(cimports),
'module_body': module_body,
'params': params,
'func_body': func_body }
for key, value in literals.items():
module_code = module_code.replace(key, value)
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
fh = open(pyx_file, 'w')
try:
fh.write(module_code)
finally:
fh.close()
extension = Extension(
name = module_name,
sources = [pyx_file],
include_dirs = c_include_dirs,
extra_compile_args = cflags)
if build_extension is None:
build_extension = _get_build_extension()
build_extension.extensions = cythonize(
[extension],
include_path=cython_include_dirs or ['.'],
compiler_directives=cython_compiler_directives,
quiet=quiet)
build_extension.build_temp = os.path.dirname(pyx_file)
build_extension.build_lib = lib_dir
build_extension.run()
module = load_dynamic(module_name, module_path)
_cython_inline_cache[orig_code, arg_sigs, key_hash] = module.__invoke
arg_list = [kwds[arg] for arg in arg_names]
return module.__invoke(*arg_list)
# Cached suffix used by cython_inline above. None should get
# overridden with actual value upon the first cython_inline invocation
cython_inline.so_ext = None
_find_non_space = re.compile('[^ ]').search
def strip_common_indent(code):
min_indent = None
lines = code.splitlines()
for line in lines:
match = _find_non_space(line)
if not match:
continue # blank
indent = match.start()
if line[indent] == '#':
continue # comment
if min_indent is None or min_indent > indent:
min_indent = indent
for ix, line in enumerate(lines):
match = _find_non_space(line)
if not match or not line or line[indent:indent+1] == '#':
continue
lines[ix] = line[min_indent:]
return '\n'.join(lines)
module_statement = re.compile(r'^((cdef +(extern|class))|cimport|(from .+ cimport)|(from .+ import +[*]))')
def extract_func_code(code):
module = []
function = []
current = function
code = code.replace('\t', ' ')
lines = code.split('\n')
for line in lines:
if not line.startswith(' '):
if module_statement.match(line):
current = module
else:
current = function
current.append(line)
return '\n'.join(module), ' ' + '\n '.join(function)
try:
from inspect import getcallargs
except ImportError:
def getcallargs(func, *arg_values, **kwd_values):
all = {}
args, varargs, kwds, defaults = inspect.getargspec(func)
if varargs is not None:
all[varargs] = arg_values[len(args):]
for name, value in zip(args, arg_values):
all[name] = value
for name, value in list(kwd_values.items()):
if name in args:
if name in all:
raise TypeError("Duplicate argument %s" % name)
all[name] = kwd_values.pop(name)
if kwds is not None:
all[kwds] = kwd_values
elif kwd_values:
raise TypeError("Unexpected keyword arguments: %s" % list(kwd_values))
if defaults is None:
defaults = ()
first_default = len(args) - len(defaults)
for ix, name in enumerate(args):
if name not in all:
if ix >= first_default:
all[name] = defaults[ix - first_default]
else:
raise TypeError("Missing argument: %s" % name)
return all
def get_body(source):
ix = source.index(':')
if source[:5] == 'lambda':
return "return %s" % source[ix+1:]
else:
return source[ix+1:]
# Lots to be done here... It would be especially cool if compiled functions
# could invoke each other quickly.
class RuntimeCompiledFunction(object):
def __init__(self, f):
self._f = f
self._body = get_body(inspect.getsource(f))
def __call__(self, *args, **kwds):
all = getcallargs(self._f, *args, **kwds)
if IS_PY3:
return cython_inline(self._body, locals=self._f.__globals__, globals=self._f.__globals__, **all)
else:
return cython_inline(self._body, locals=self._f.func_globals, globals=self._f.func_globals, **all)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/IpythonMagic.py 0000644 0001751 0000171 00000051206 00000000000 021214 0 ustar 00runner docker 0000000 0000000 # -*- coding: utf-8 -*-
"""
=====================
Cython related magics
=====================
Magic command interface for interactive work with Cython
.. note::
The ``Cython`` package needs to be installed separately. It
can be obtained using ``easy_install`` or ``pip``.
Usage
=====
To enable the magics below, execute ``%load_ext cython``.
``%%cython``
{CYTHON_DOC}
``%%cython_inline``
{CYTHON_INLINE_DOC}
``%%cython_pyximport``
{CYTHON_PYXIMPORT_DOC}
Author:
* Brian Granger
Code moved from IPython and adapted by:
* Martín Gaitán
Parts of this code were taken from Cython.inline.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2010-2011, IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file ipython-COPYING.rst, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import imp
import io
import os
import re
import sys
import time
import copy
import distutils.log
import textwrap
IO_ENCODING = sys.getfilesystemencoding()
IS_PY2 = sys.version_info[0] < 3
try:
reload
except NameError: # Python 3
from imp import reload
try:
import hashlib
except ImportError:
import md5 as hashlib
from distutils.core import Distribution, Extension
from distutils.command.build_ext import build_ext
from IPython.core import display
from IPython.core import magic_arguments
from IPython.core.magic import Magics, magics_class, cell_magic
try:
from IPython.paths import get_ipython_cache_dir
except ImportError:
# older IPython version
from IPython.utils.path import get_ipython_cache_dir
from IPython.utils.text import dedent
from ..Shadow import __version__ as cython_version
from ..Compiler.Errors import CompileError
from .Inline import cython_inline
from .Dependencies import cythonize
PGO_CONFIG = {
'gcc': {
'gen': ['-fprofile-generate', '-fprofile-dir={TEMPDIR}'],
'use': ['-fprofile-use', '-fprofile-correction', '-fprofile-dir={TEMPDIR}'],
},
# blind copy from 'configure' script in CPython 3.7
'icc': {
'gen': ['-prof-gen'],
'use': ['-prof-use'],
}
}
PGO_CONFIG['mingw32'] = PGO_CONFIG['gcc']
if IS_PY2:
def encode_fs(name):
return name if isinstance(name, bytes) else name.encode(IO_ENCODING)
else:
def encode_fs(name):
return name
@magics_class
class CythonMagics(Magics):
def __init__(self, shell):
super(CythonMagics, self).__init__(shell)
self._reloads = {}
self._code_cache = {}
self._pyximport_installed = False
def _import_all(self, module):
mdict = module.__dict__
if '__all__' in mdict:
keys = mdict['__all__']
else:
keys = [k for k in mdict if not k.startswith('_')]
for k in keys:
try:
self.shell.push({k: mdict[k]})
except KeyError:
msg = "'module' object has no attribute '%s'" % k
raise AttributeError(msg)
@cell_magic
def cython_inline(self, line, cell):
"""Compile and run a Cython code cell using Cython.inline.
This magic simply passes the body of the cell to Cython.inline
and returns the result. If the variables `a` and `b` are defined
in the user's namespace, here is a simple example that returns
their sum::
%%cython_inline
return a+b
For most purposes, we recommend the usage of the `%%cython` magic.
"""
locs = self.shell.user_global_ns
globs = self.shell.user_ns
return cython_inline(cell, locals=locs, globals=globs)
@cell_magic
def cython_pyximport(self, line, cell):
"""Compile and import a Cython code cell using pyximport.
The contents of the cell are written to a `.pyx` file in the current
working directory, which is then imported using `pyximport`. This
magic requires a module name to be passed::
%%cython_pyximport modulename
def f(x):
return 2.0*x
The compiled module is then imported and all of its symbols are
injected into the user's namespace. For most purposes, we recommend
the usage of the `%%cython` magic.
"""
module_name = line.strip()
if not module_name:
raise ValueError('module name must be given')
fname = module_name + '.pyx'
with io.open(fname, 'w', encoding='utf-8') as f:
f.write(cell)
if 'pyximport' not in sys.modules or not self._pyximport_installed:
import pyximport
pyximport.install()
self._pyximport_installed = True
if module_name in self._reloads:
module = self._reloads[module_name]
# Note: reloading extension modules is not actually supported
# (requires PEP-489 reinitialisation support).
# Don't know why this should ever have worked as it reads here.
# All we really need to do is to update the globals below.
#reload(module)
else:
__import__(module_name)
module = sys.modules[module_name]
self._reloads[module_name] = module
self._import_all(module)
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-a', '--annotate', action='store_true', default=False,
help="Produce a colorized HTML version of the source."
)
@magic_arguments.argument(
'-+', '--cplus', action='store_true', default=False,
help="Output a C++ rather than C file."
)
@magic_arguments.argument(
'-3', dest='language_level', action='store_const', const=3, default=None,
help="Select Python 3 syntax."
)
@magic_arguments.argument(
'-2', dest='language_level', action='store_const', const=2, default=None,
help="Select Python 2 syntax."
)
@magic_arguments.argument(
'-f', '--force', action='store_true', default=False,
help="Force the compilation of a new module, even if the source has been "
"previously compiled."
)
@magic_arguments.argument(
'-c', '--compile-args', action='append', default=[],
help="Extra flags to pass to compiler via the `extra_compile_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'--link-args', action='append', default=[],
help="Extra flags to pass to linker via the `extra_link_args` "
"Extension flag (can be specified multiple times)."
)
@magic_arguments.argument(
'-l', '--lib', action='append', default=[],
help="Add a library to link the extension against (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-n', '--name',
help="Specify a name for the Cython module."
)
@magic_arguments.argument(
'-L', dest='library_dirs', metavar='dir', action='append', default=[],
help="Add a path to the list of library directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-I', '--include', action='append', default=[],
help="Add a path to the list of include directories (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'-S', '--src', action='append', default=[],
help="Add a path to the list of src files (can be specified "
"multiple times)."
)
@magic_arguments.argument(
'--pgo', dest='pgo', action='store_true', default=False,
help=("Enable profile guided optimisation in the C compiler. "
"Compiles the cell twice and executes it in between to generate a runtime profile.")
)
@magic_arguments.argument(
'--verbose', dest='quiet', action='store_false', default=True,
help=("Print debug information like generated .c/.cpp file location "
"and exact gcc/g++ command invoked.")
)
@cell_magic
def cython(self, line, cell):
"""Compile and import everything from a Cython code cell.
The contents of the cell are written to a `.pyx` file in the
directory `IPYTHONDIR/cython` using a filename with the hash of the
code. This file is then cythonized and compiled. The resulting module
is imported and all of its symbols are injected into the user's
namespace. The usage is similar to that of `%%cython_pyximport` but
you don't have to pass a module name::
%%cython
def f(x):
return 2.0*x
To compile OpenMP codes, pass the required `--compile-args`
and `--link-args`. For example with gcc::
%%cython --compile-args=-fopenmp --link-args=-fopenmp
...
To enable profile guided optimisation, pass the ``--pgo`` option.
Note that the cell itself needs to take care of establishing a suitable
profile when executed. This can be done by implementing the functions to
optimise, and then calling them directly in the same cell on some realistic
training data like this::
%%cython --pgo
def critical_function(data):
for item in data:
...
# execute function several times to build profile
from somewhere import some_typical_data
for _ in range(100):
critical_function(some_typical_data)
In Python 3.5 and later, you can distinguish between the profile and
non-profile runs as follows::
if "_pgo_" in __name__:
... # execute critical code here
"""
args = magic_arguments.parse_argstring(self.cython, line)
code = cell if cell.endswith('\n') else cell + '\n'
lib_dir = os.path.join(get_ipython_cache_dir(), 'cython')
key = (code, line, sys.version_info, sys.executable, cython_version)
if not os.path.exists(lib_dir):
os.makedirs(lib_dir)
if args.pgo:
key += ('pgo',)
if args.force:
# Force a new module name by adding the current time to the
# key which is hashed to determine the module name.
key += (time.time(),)
if args.name:
module_name = str(args.name) # no-op in Py3
else:
module_name = "_cython_magic_" + hashlib.md5(str(key).encode('utf-8')).hexdigest()
html_file = os.path.join(lib_dir, module_name + '.html')
module_path = os.path.join(lib_dir, module_name + self.so_ext)
have_module = os.path.isfile(module_path)
need_cythonize = args.pgo or not have_module
if args.annotate:
if not os.path.isfile(html_file):
need_cythonize = True
extension = None
if need_cythonize:
extensions = self._cythonize(module_name, code, lib_dir, args, quiet=args.quiet)
if extensions is None:
# Compilation failed and printed error message
return None
assert len(extensions) == 1
extension = extensions[0]
self._code_cache[key] = module_name
if args.pgo:
self._profile_pgo_wrapper(extension, lib_dir)
try:
self._build_extension(extension, lib_dir, pgo_step_name='use' if args.pgo else None,
quiet=args.quiet)
except distutils.errors.CompileError:
# Build failed and printed error message
return None
module = imp.load_dynamic(module_name, module_path)
self._import_all(module)
if args.annotate:
try:
with io.open(html_file, encoding='utf-8') as f:
annotated_html = f.read()
except IOError as e:
# File could not be opened. Most likely the user has a version
# of Cython before 0.15.1 (when `cythonize` learned the
# `force` keyword argument) and has already compiled this
# exact source without annotation.
print('Cython completed successfully but the annotated '
'source could not be read.', file=sys.stderr)
print(e, file=sys.stderr)
else:
return display.HTML(self.clean_annotated_html(annotated_html))
def _profile_pgo_wrapper(self, extension, lib_dir):
"""
Generate a .c file for a separate extension module that calls the
module init function of the original module. This makes sure that the
PGO profiler sees the correct .o file of the final module, but it still
allows us to import the module under a different name for profiling,
before recompiling it into the PGO optimised module. Overwriting and
reimporting the same shared library is not portable.
"""
extension = copy.copy(extension) # shallow copy, do not modify sources in place!
module_name = extension.name
pgo_module_name = '_pgo_' + module_name
pgo_wrapper_c_file = os.path.join(lib_dir, pgo_module_name + '.c')
with io.open(pgo_wrapper_c_file, 'w', encoding='utf-8') as f:
f.write(textwrap.dedent(u"""
#include "Python.h"
#if PY_MAJOR_VERSION < 3
extern PyMODINIT_FUNC init%(module_name)s(void);
PyMODINIT_FUNC init%(pgo_module_name)s(void); /*proto*/
PyMODINIT_FUNC init%(pgo_module_name)s(void) {
PyObject *sys_modules;
init%(module_name)s(); if (PyErr_Occurred()) return;
sys_modules = PyImport_GetModuleDict(); /* borrowed, no exception, "never" fails */
if (sys_modules) {
PyObject *module = PyDict_GetItemString(sys_modules, "%(module_name)s"); if (!module) return;
PyDict_SetItemString(sys_modules, "%(pgo_module_name)s", module);
Py_DECREF(module);
}
}
#else
extern PyMODINIT_FUNC PyInit_%(module_name)s(void);
PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void); /*proto*/
PyMODINIT_FUNC PyInit_%(pgo_module_name)s(void) {
return PyInit_%(module_name)s();
}
#endif
""" % {'module_name': module_name, 'pgo_module_name': pgo_module_name}))
extension.sources = extension.sources + [pgo_wrapper_c_file] # do not modify in place!
extension.name = pgo_module_name
self._build_extension(extension, lib_dir, pgo_step_name='gen')
# import and execute module code to generate profile
so_module_path = os.path.join(lib_dir, pgo_module_name + self.so_ext)
imp.load_dynamic(pgo_module_name, so_module_path)
def _cythonize(self, module_name, code, lib_dir, args, quiet=True):
pyx_file = os.path.join(lib_dir, module_name + '.pyx')
pyx_file = encode_fs(pyx_file)
c_include_dirs = args.include
c_src_files = list(map(str, args.src))
if 'numpy' in code:
import numpy
c_include_dirs.append(numpy.get_include())
with io.open(pyx_file, 'w', encoding='utf-8') as f:
f.write(code)
extension = Extension(
name=module_name,
sources=[pyx_file] + c_src_files,
include_dirs=c_include_dirs,
library_dirs=args.library_dirs,
extra_compile_args=args.compile_args,
extra_link_args=args.link_args,
libraries=args.lib,
language='c++' if args.cplus else 'c',
)
try:
opts = dict(
quiet=quiet,
annotate=args.annotate,
force=True,
)
if args.language_level is not None:
assert args.language_level in (2, 3)
opts['language_level'] = args.language_level
elif sys.version_info[0] >= 3:
opts['language_level'] = 3
return cythonize([extension], **opts)
except CompileError:
return None
def _build_extension(self, extension, lib_dir, temp_dir=None, pgo_step_name=None, quiet=True):
build_extension = self._get_build_extension(
extension, lib_dir=lib_dir, temp_dir=temp_dir, pgo_step_name=pgo_step_name)
old_threshold = None
try:
if not quiet:
old_threshold = distutils.log.set_threshold(distutils.log.DEBUG)
build_extension.run()
finally:
if not quiet and old_threshold is not None:
distutils.log.set_threshold(old_threshold)
def _add_pgo_flags(self, build_extension, step_name, temp_dir):
compiler_type = build_extension.compiler.compiler_type
if compiler_type == 'unix':
compiler_cmd = build_extension.compiler.compiler_so
# TODO: we could try to call "[cmd] --version" for better insights
if not compiler_cmd:
pass
elif 'clang' in compiler_cmd or 'clang' in compiler_cmd[0]:
compiler_type = 'clang'
elif 'icc' in compiler_cmd or 'icc' in compiler_cmd[0]:
compiler_type = 'icc'
elif 'gcc' in compiler_cmd or 'gcc' in compiler_cmd[0]:
compiler_type = 'gcc'
elif 'g++' in compiler_cmd or 'g++' in compiler_cmd[0]:
compiler_type = 'gcc'
config = PGO_CONFIG.get(compiler_type)
orig_flags = []
if config and step_name in config:
flags = [f.format(TEMPDIR=temp_dir) for f in config[step_name]]
for extension in build_extension.extensions:
orig_flags.append((extension.extra_compile_args, extension.extra_link_args))
extension.extra_compile_args = extension.extra_compile_args + flags
extension.extra_link_args = extension.extra_link_args + flags
else:
print("No PGO %s configuration known for C compiler type '%s'" % (step_name, compiler_type),
file=sys.stderr)
return orig_flags
@property
def so_ext(self):
"""The extension suffix for compiled modules."""
try:
return self._so_ext
except AttributeError:
self._so_ext = self._get_build_extension().get_ext_filename('')
return self._so_ext
def _clear_distutils_mkpath_cache(self):
"""clear distutils mkpath cache
prevents distutils from skipping re-creation of dirs that have been removed
"""
try:
from distutils.dir_util import _path_created
except ImportError:
pass
else:
_path_created.clear()
def _get_build_extension(self, extension=None, lib_dir=None, temp_dir=None,
pgo_step_name=None, _build_ext=build_ext):
self._clear_distutils_mkpath_cache()
dist = Distribution()
config_files = dist.find_config_files()
try:
config_files.remove('setup.cfg')
except ValueError:
pass
dist.parse_config_files(config_files)
if not temp_dir:
temp_dir = lib_dir
add_pgo_flags = self._add_pgo_flags
if pgo_step_name:
base_build_ext = _build_ext
class _build_ext(_build_ext):
def build_extensions(self):
add_pgo_flags(self, pgo_step_name, temp_dir)
base_build_ext.build_extensions(self)
build_extension = _build_ext(dist)
build_extension.finalize_options()
if temp_dir:
temp_dir = encode_fs(temp_dir)
build_extension.build_temp = temp_dir
if lib_dir:
lib_dir = encode_fs(lib_dir)
build_extension.build_lib = lib_dir
if extension is not None:
build_extension.extensions = [extension]
return build_extension
@staticmethod
def clean_annotated_html(html):
"""Clean up the annotated HTML source.
Strips the link to the generated C or C++ file, which we do not
present to the user.
"""
r = re.compile('Raw output: (.*) ')
html = '\n'.join(l for l in html.splitlines() if not r.match(l))
return html
__doc__ = __doc__.format(
# rST doesn't see the -+ flag as part of an option list, so we
# hide it from the module-level docstring.
CYTHON_DOC=dedent(CythonMagics.cython.__doc__\
.replace('-+, --cplus', '--cplus ')),
CYTHON_INLINE_DOC=dedent(CythonMagics.cython_inline.__doc__),
CYTHON_PYXIMPORT_DOC=dedent(CythonMagics.cython_pyximport.__doc__),
)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055920.660294
Cython-0.29.28/Cython/Build/Tests/ 0000755 0001751 0000171 00000000000 00000000000 017345 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/Tests/TestCyCache.py 0000644 0001751 0000171 00000010067 00000000000 022062 0 ustar 00runner docker 0000000 0000000 import difflib
import glob
import gzip
import os
import tempfile
import Cython.Build.Dependencies
import Cython.Utils
from Cython.TestUtils import CythonTest
class TestCyCache(CythonTest):
def setUp(self):
CythonTest.setUp(self)
self.temp_dir = tempfile.mkdtemp(
prefix='cycache-test',
dir='TEST_TMP' if os.path.isdir('TEST_TMP') else None)
self.src_dir = tempfile.mkdtemp(prefix='src', dir=self.temp_dir)
self.cache_dir = tempfile.mkdtemp(prefix='cache', dir=self.temp_dir)
def cache_files(self, file_glob):
return glob.glob(os.path.join(self.cache_dir, file_glob))
def fresh_cythonize(self, *args, **kwargs):
Cython.Utils.clear_function_caches()
Cython.Build.Dependencies._dep_tree = None # discard method caches
Cython.Build.Dependencies.cythonize(*args, **kwargs)
def test_cycache_switch(self):
content1 = 'value = 1\n'
content2 = 'value = 2\n'
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
open(a_pyx, 'w').write(content1)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
self.assertEqual(1, len(self.cache_files('a.c*')))
a_contents1 = open(a_c).read()
os.unlink(a_c)
open(a_pyx, 'w').write(content2)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
a_contents2 = open(a_c).read()
os.unlink(a_c)
self.assertNotEqual(a_contents1, a_contents2, 'C file not changed!')
self.assertEqual(2, len(self.cache_files('a.c*')))
open(a_pyx, 'w').write(content1)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
self.assertEqual(2, len(self.cache_files('a.c*')))
a_contents = open(a_c).read()
self.assertEqual(
a_contents, a_contents1,
msg='\n'.join(list(difflib.unified_diff(
a_contents.split('\n'), a_contents1.split('\n')))[:10]))
def test_cycache_uses_cache(self):
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
open(a_pyx, 'w').write('pass')
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
a_cache = os.path.join(self.cache_dir, os.listdir(self.cache_dir)[0])
gzip.GzipFile(a_cache, 'wb').write('fake stuff'.encode('ascii'))
os.unlink(a_c)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
a_contents = open(a_c).read()
self.assertEqual(a_contents, 'fake stuff',
'Unexpected contents: %s...' % a_contents[:100])
def test_multi_file_output(self):
a_pyx = os.path.join(self.src_dir, 'a.pyx')
a_c = a_pyx[:-4] + '.c'
a_h = a_pyx[:-4] + '.h'
a_api_h = a_pyx[:-4] + '_api.h'
open(a_pyx, 'w').write('cdef public api int foo(int x): return x\n')
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
expected = [a_c, a_h, a_api_h]
for output in expected:
self.assertTrue(os.path.exists(output), output)
os.unlink(output)
self.fresh_cythonize(a_pyx, cache=self.cache_dir)
for output in expected:
self.assertTrue(os.path.exists(output), output)
def test_options_invalidation(self):
hash_pyx = os.path.join(self.src_dir, 'options.pyx')
hash_c = hash_pyx[:-len('.pyx')] + '.c'
open(hash_pyx, 'w').write('pass')
self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False)
self.assertEqual(1, len(self.cache_files('options.c*')))
os.unlink(hash_c)
self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=True)
self.assertEqual(2, len(self.cache_files('options.c*')))
os.unlink(hash_c)
self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=False)
self.assertEqual(2, len(self.cache_files('options.c*')))
os.unlink(hash_c)
self.fresh_cythonize(hash_pyx, cache=self.cache_dir, cplus=False, show_version=True)
self.assertEqual(2, len(self.cache_files('options.c*')))
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/Tests/TestInline.py 0000644 0001751 0000171 00000005446 00000000000 022006 0 ustar 00runner docker 0000000 0000000 import os, tempfile
from Cython.Shadow import inline
from Cython.Build.Inline import safe_type
from Cython.TestUtils import CythonTest
try:
import numpy
has_numpy = True
except:
has_numpy = False
test_kwds = dict(force=True, quiet=True)
global_value = 100
class TestInline(CythonTest):
def setUp(self):
CythonTest.setUp(self)
self.test_kwds = dict(test_kwds)
if os.path.isdir('TEST_TMP'):
lib_dir = os.path.join('TEST_TMP','inline')
else:
lib_dir = tempfile.mkdtemp(prefix='cython_inline_')
self.test_kwds['lib_dir'] = lib_dir
def test_simple(self):
self.assertEqual(inline("return 1+2", **self.test_kwds), 3)
def test_types(self):
self.assertEqual(inline("""
cimport cython
return cython.typeof(a), cython.typeof(b)
""", a=1.0, b=[], **self.test_kwds), ('double', 'list object'))
def test_locals(self):
a = 1
b = 2
self.assertEqual(inline("return a+b", **self.test_kwds), 3)
def test_globals(self):
self.assertEqual(inline("return global_value + 1", **self.test_kwds), global_value + 1)
def test_no_return(self):
self.assertEqual(inline("""
a = 1
cdef double b = 2
cdef c = []
""", **self.test_kwds), dict(a=1, b=2.0, c=[]))
def test_def_node(self):
foo = inline("def foo(x): return x * x", **self.test_kwds)['foo']
self.assertEqual(foo(7), 49)
def test_class_ref(self):
class Type(object):
pass
tp = inline("Type")['Type']
self.assertEqual(tp, Type)
def test_pure(self):
import cython as cy
b = inline("""
b = cy.declare(float, a)
c = cy.declare(cy.pointer(cy.float), &b)
return b
""", a=3, **self.test_kwds)
self.assertEqual(type(b), float)
def test_compiler_directives(self):
self.assertEqual(
inline('return sum(x)',
x=[1, 2, 3],
cython_compiler_directives={'boundscheck': False}),
6
)
def test_lang_version(self):
# GH-3419. Caching for inline code didn't always respect compiler directives.
inline_divcode = "def f(int a, int b): return a/b"
self.assertEqual(
inline(inline_divcode, language_level=2)['f'](5,2),
2
)
self.assertEqual(
inline(inline_divcode, language_level=3)['f'](5,2),
2.5
)
if has_numpy:
def test_numpy(self):
import numpy
a = numpy.ndarray((10, 20))
a[0,0] = 10
self.assertEqual(safe_type(a), 'numpy.ndarray[numpy.float64_t, ndim=2]')
self.assertEqual(inline("return a[0,0]", a=a, **self.test_kwds), 10.0)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/Tests/TestIpythonMagic.py 0000644 0001751 0000171 00000014121 00000000000 023151 0 ustar 00runner docker 0000000 0000000 # -*- coding: utf-8 -*-
# tag: ipython
"""Tests for the Cython magics extension."""
from __future__ import absolute_import
import os
import sys
from contextlib import contextmanager
from Cython.Build import IpythonMagic
from Cython.TestUtils import CythonTest
try:
import IPython.testing.globalipapp
except ImportError:
# Disable tests and fake helpers for initialisation below.
def skip_if_not_installed(_):
return None
else:
def skip_if_not_installed(c):
return c
try:
# disable IPython history thread before it gets started to avoid having to clean it up
from IPython.core.history import HistoryManager
HistoryManager.enabled = False
except ImportError:
pass
code = u"""\
def f(x):
return 2*x
"""
cython3_code = u"""\
def f(int x):
return 2 / x
def call(x):
return f(*(x,))
"""
pgo_cython3_code = cython3_code + u"""\
def main():
for _ in range(100): call(5)
main()
"""
if sys.platform == 'win32':
# not using IPython's decorators here because they depend on "nose"
try:
from unittest import skip as skip_win32
except ImportError:
# poor dev's silent @unittest.skip()
def skip_win32(dummy):
def _skip_win32(func):
return None
return _skip_win32
else:
def skip_win32(dummy):
def _skip_win32(func):
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return wrapper
return _skip_win32
@skip_if_not_installed
class TestIPythonMagic(CythonTest):
@classmethod
def setUpClass(cls):
CythonTest.setUpClass()
cls._ip = IPython.testing.globalipapp.get_ipython()
def setUp(self):
CythonTest.setUp(self)
self._ip.extension_manager.load_extension('cython')
def test_cython_inline(self):
ip = self._ip
ip.ex('a=10; b=20')
result = ip.run_cell_magic('cython_inline', '', 'return a+b')
self.assertEqual(result, 30)
@skip_win32('Skip on Windows')
def test_cython_pyximport(self):
ip = self._ip
module_name = '_test_cython_pyximport'
ip.run_cell_magic('cython_pyximport', module_name, code)
ip.ex('g = f(10)')
self.assertEqual(ip.user_ns['g'], 20.0)
ip.run_cell_magic('cython_pyximport', module_name, code)
ip.ex('h = f(-10)')
self.assertEqual(ip.user_ns['h'], -20.0)
try:
os.remove(module_name + '.pyx')
except OSError:
pass
def test_cython(self):
ip = self._ip
ip.run_cell_magic('cython', '', code)
ip.ex('g = f(10)')
self.assertEqual(ip.user_ns['g'], 20.0)
def test_cython_name(self):
# The Cython module named 'mymodule' defines the function f.
ip = self._ip
ip.run_cell_magic('cython', '--name=mymodule', code)
# This module can now be imported in the interactive namespace.
ip.ex('import mymodule; g = mymodule.f(10)')
self.assertEqual(ip.user_ns['g'], 20.0)
def test_cython_language_level(self):
# The Cython cell defines the functions f() and call().
ip = self._ip
ip.run_cell_magic('cython', '', cython3_code)
ip.ex('g = f(10); h = call(10)')
if sys.version_info[0] < 3:
self.assertEqual(ip.user_ns['g'], 2 // 10)
self.assertEqual(ip.user_ns['h'], 2 // 10)
else:
self.assertEqual(ip.user_ns['g'], 2.0 / 10.0)
self.assertEqual(ip.user_ns['h'], 2.0 / 10.0)
def test_cython3(self):
# The Cython cell defines the functions f() and call().
ip = self._ip
ip.run_cell_magic('cython', '-3', cython3_code)
ip.ex('g = f(10); h = call(10)')
self.assertEqual(ip.user_ns['g'], 2.0 / 10.0)
self.assertEqual(ip.user_ns['h'], 2.0 / 10.0)
def test_cython2(self):
# The Cython cell defines the functions f() and call().
ip = self._ip
ip.run_cell_magic('cython', '-2', cython3_code)
ip.ex('g = f(10); h = call(10)')
self.assertEqual(ip.user_ns['g'], 2 // 10)
self.assertEqual(ip.user_ns['h'], 2 // 10)
@skip_win32('Skip on Windows')
def test_cython3_pgo(self):
# The Cython cell defines the functions f() and call().
ip = self._ip
ip.run_cell_magic('cython', '-3 --pgo', pgo_cython3_code)
ip.ex('g = f(10); h = call(10); main()')
self.assertEqual(ip.user_ns['g'], 2.0 / 10.0)
self.assertEqual(ip.user_ns['h'], 2.0 / 10.0)
@skip_win32('Skip on Windows')
def test_extlibs(self):
ip = self._ip
code = u"""
from libc.math cimport sin
x = sin(0.0)
"""
ip.user_ns['x'] = 1
ip.run_cell_magic('cython', '-l m', code)
self.assertEqual(ip.user_ns['x'], 0)
def test_cython_verbose(self):
ip = self._ip
ip.run_cell_magic('cython', '--verbose', code)
ip.ex('g = f(10)')
self.assertEqual(ip.user_ns['g'], 20.0)
def test_cython_verbose_thresholds(self):
@contextmanager
def mock_distutils():
class MockLog:
DEBUG = 1
INFO = 2
thresholds = [INFO]
def set_threshold(self, val):
self.thresholds.append(val)
return self.thresholds[-2]
new_log = MockLog()
old_log = IpythonMagic.distutils.log
try:
IpythonMagic.distutils.log = new_log
yield new_log
finally:
IpythonMagic.distutils.log = old_log
ip = self._ip
with mock_distutils() as verbose_log:
ip.run_cell_magic('cython', '--verbose', code)
ip.ex('g = f(10)')
self.assertEqual(ip.user_ns['g'], 20.0)
self.assertEqual([verbose_log.INFO, verbose_log.DEBUG, verbose_log.INFO],
verbose_log.thresholds)
with mock_distutils() as normal_log:
ip.run_cell_magic('cython', '', code)
ip.ex('g = f(10)')
self.assertEqual(ip.user_ns['g'], 20.0)
self.assertEqual([normal_log.INFO], normal_log.thresholds)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/Tests/TestStripLiterals.py 0000644 0001751 0000171 00000003016 00000000000 023360 0 ustar 00runner docker 0000000 0000000 from Cython.Build.Dependencies import strip_string_literals
from Cython.TestUtils import CythonTest
class TestStripLiterals(CythonTest):
def t(self, before, expected):
actual, literals = strip_string_literals(before, prefix="_L")
self.assertEqual(expected, actual)
for key, value in literals.items():
actual = actual.replace(key, value)
self.assertEqual(before, actual)
def test_empty(self):
self.t("", "")
def test_single_quote(self):
self.t("'x'", "'_L1_'")
def test_double_quote(self):
self.t('"x"', '"_L1_"')
def test_nested_quotes(self):
self.t(""" '"' "'" """, """ '_L1_' "_L2_" """)
def test_triple_quote(self):
self.t(" '''a\n''' ", " '''_L1_''' ")
def test_backslash(self):
self.t(r"'a\'b'", "'_L1_'")
self.t(r"'a\\'", "'_L1_'")
self.t(r"'a\\\'b'", "'_L1_'")
def test_unicode(self):
self.t("u'abc'", "u'_L1_'")
def test_raw(self):
self.t(r"r'abc\\'", "r'_L1_'")
def test_raw_unicode(self):
self.t(r"ru'abc\\'", "ru'_L1_'")
def test_comment(self):
self.t("abc # foo", "abc #_L1_")
def test_comment_and_quote(self):
self.t("abc # 'x'", "abc #_L1_")
self.t("'abc#'", "'_L1_'")
def test_include(self):
self.t("include 'a.pxi' # something here",
"include '_L1_' #_L2_")
def test_extern(self):
self.t("cdef extern from 'a.h': # comment",
"cdef extern from '_L1_': #_L2_")
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/Tests/__init__.py 0000644 0001751 0000171 00000000015 00000000000 021452 0 ustar 00runner docker 0000000 0000000 # empty file
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Build/__init__.py 0000644 0001751 0000171 00000000105 00000000000 020350 0 ustar 00runner docker 0000000 0000000 from .Dependencies import cythonize
from .Distutils import build_ext
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/CodeWriter.py 0000644 0001751 0000171 00000056535 00000000000 017643 0 ustar 00runner docker 0000000 0000000 """
Serializes a Cython code tree to Cython code. This is primarily useful for
debugging and testing purposes.
The output is in a strict format, no whitespace or comments from the input
is preserved (and it could not be as it is not present in the code tree).
"""
from __future__ import absolute_import, print_function
from .Compiler.Visitor import TreeVisitor
from .Compiler.ExprNodes import *
class LinesResult(object):
def __init__(self):
self.lines = []
self.s = u""
def put(self, s):
self.s += s
def newline(self):
self.lines.append(self.s)
self.s = u""
def putline(self, s):
self.put(s)
self.newline()
class DeclarationWriter(TreeVisitor):
indent_string = u" "
def __init__(self, result=None):
super(DeclarationWriter, self).__init__()
if result is None:
result = LinesResult()
self.result = result
self.numindents = 0
self.tempnames = {}
self.tempblockindex = 0
def write(self, tree):
self.visit(tree)
return self.result
def indent(self):
self.numindents += 1
def dedent(self):
self.numindents -= 1
def startline(self, s=u""):
self.result.put(self.indent_string * self.numindents + s)
def put(self, s):
self.result.put(s)
def putline(self, s):
self.result.putline(self.indent_string * self.numindents + s)
def endline(self, s=u""):
self.result.putline(s)
def line(self, s):
self.startline(s)
self.endline()
def comma_separated_list(self, items, output_rhs=False):
if len(items) > 0:
for item in items[:-1]:
self.visit(item)
if output_rhs and item.default is not None:
self.put(u" = ")
self.visit(item.default)
self.put(u", ")
self.visit(items[-1])
def visit_Node(self, node):
raise AssertionError("Node not handled by serializer: %r" % node)
def visit_ModuleNode(self, node):
self.visitchildren(node)
def visit_StatListNode(self, node):
self.visitchildren(node)
def visit_CDefExternNode(self, node):
if node.include_file is None:
file = u'*'
else:
file = u'"%s"' % node.include_file
self.putline(u"cdef extern from %s:" % file)
self.indent()
self.visit(node.body)
self.dedent()
def visit_CPtrDeclaratorNode(self, node):
self.put('*')
self.visit(node.base)
def visit_CReferenceDeclaratorNode(self, node):
self.put('&')
self.visit(node.base)
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if node.dimension is not None:
self.visit(node.dimension)
self.put(u']')
def visit_CArrayDeclaratorNode(self, node):
self.visit(node.base)
self.put(u'[')
if node.dimension is not None:
self.visit(node.dimension)
self.put(u']')
def visit_CFuncDeclaratorNode(self, node):
# TODO: except, gil, etc.
self.visit(node.base)
self.put(u'(')
self.comma_separated_list(node.args)
self.endline(u')')
def visit_CNameDeclaratorNode(self, node):
self.put(node.name)
def visit_CSimpleBaseTypeNode(self, node):
# See Parsing.p_sign_and_longness
if node.is_basic_c_type:
self.put(("unsigned ", "", "signed ")[node.signed])
if node.longness < 0:
self.put("short " * -node.longness)
elif node.longness > 0:
self.put("long " * node.longness)
self.put(node.name)
def visit_CComplexBaseTypeNode(self, node):
self.put(u'(')
self.visit(node.base_type)
self.visit(node.declarator)
self.put(u')')
def visit_CNestedBaseTypeNode(self, node):
self.visit(node.base_type)
self.put(u'.')
self.put(node.name)
def visit_TemplatedTypeNode(self, node):
self.visit(node.base_type_node)
self.put(u'[')
self.comma_separated_list(node.positional_args + node.keyword_args.key_value_pairs)
self.put(u']')
def visit_CVarDefNode(self, node):
self.startline(u"cdef ")
self.visit(node.base_type)
self.put(u" ")
self.comma_separated_list(node.declarators, output_rhs=True)
self.endline()
def visit_container_node(self, node, decl, extras, attributes):
# TODO: visibility
self.startline(decl)
if node.name:
self.put(u' ')
self.put(node.name)
if node.cname is not None:
self.put(u' "%s"' % node.cname)
if extras:
self.put(extras)
self.endline(':')
self.indent()
if not attributes:
self.putline('pass')
else:
for attribute in attributes:
self.visit(attribute)
self.dedent()
def visit_CStructOrUnionDefNode(self, node):
if node.typedef_flag:
decl = u'ctypedef '
else:
decl = u'cdef '
if node.visibility == 'public':
decl += u'public '
if node.packed:
decl += u'packed '
decl += node.kind
self.visit_container_node(node, decl, None, node.attributes)
def visit_CppClassNode(self, node):
extras = ""
if node.templates:
extras = u"[%s]" % ", ".join(node.templates)
if node.base_classes:
extras += "(%s)" % ", ".join(node.base_classes)
self.visit_container_node(node, u"cdef cppclass", extras, node.attributes)
def visit_CEnumDefNode(self, node):
self.visit_container_node(node, u"cdef enum", None, node.items)
def visit_CEnumDefItemNode(self, node):
self.startline(node.name)
if node.cname:
self.put(u' "%s"' % node.cname)
if node.value:
self.put(u" = ")
self.visit(node.value)
self.endline()
def visit_CClassDefNode(self, node):
assert not node.module_name
if node.decorators:
for decorator in node.decorators:
self.visit(decorator)
self.startline(u"cdef class ")
self.put(node.class_name)
if node.base_class_name:
self.put(u"(")
if node.base_class_module:
self.put(node.base_class_module)
self.put(u".")
self.put(node.base_class_name)
self.put(u")")
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_CTypeDefNode(self, node):
self.startline(u"ctypedef ")
self.visit(node.base_type)
self.put(u" ")
self.visit(node.declarator)
self.endline()
def visit_FuncDefNode(self, node):
self.startline(u"def %s(" % node.name)
self.comma_separated_list(node.args)
self.endline(u"):")
self.indent()
self.visit(node.body)
self.dedent()
def visit_CArgDeclNode(self, node):
if node.base_type.name is not None:
self.visit(node.base_type)
self.put(u" ")
self.visit(node.declarator)
if node.default is not None:
self.put(u" = ")
self.visit(node.default)
def visit_CImportStatNode(self, node):
self.startline(u"cimport ")
self.put(node.module_name)
if node.as_name:
self.put(u" as ")
self.put(node.as_name)
self.endline()
def visit_FromCImportStatNode(self, node):
self.startline(u"from ")
self.put(node.module_name)
self.put(u" cimport ")
first = True
for pos, name, as_name, kind in node.imported_names:
assert kind is None
if first:
first = False
else:
self.put(u", ")
self.put(name)
if as_name:
self.put(u" as ")
self.put(as_name)
self.endline()
def visit_NameNode(self, node):
self.put(node.name)
def visit_IntNode(self, node):
self.put(node.value)
def visit_NoneNode(self, node):
self.put(u"None")
def visit_NotNode(self, node):
self.put(u"(not ")
self.visit(node.operand)
self.put(u")")
def visit_DecoratorNode(self, node):
self.startline("@")
self.visit(node.decorator)
self.endline()
def visit_BinopNode(self, node):
self.visit(node.operand1)
self.put(u" %s " % node.operator)
self.visit(node.operand2)
def visit_AttributeNode(self, node):
self.visit(node.obj)
self.put(u".%s" % node.attribute)
def visit_BoolNode(self, node):
self.put(str(node.value))
# FIXME: represent string nodes correctly
def visit_StringNode(self, node):
value = node.value
if value.encoding is not None:
value = value.encode(value.encoding)
self.put(repr(value))
def visit_PassStatNode(self, node):
self.startline(u"pass")
self.endline()
class CodeWriter(DeclarationWriter):
def visit_SingleAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
def visit_CascadedAssignmentNode(self, node):
self.startline()
for lhs in node.lhs_list:
self.visit(lhs)
self.put(u" = ")
self.visit(node.rhs)
self.endline()
def visit_PrintStatNode(self, node):
self.startline(u"print ")
self.comma_separated_list(node.arg_tuple.args)
if not node.append_newline:
self.put(u",")
self.endline()
def visit_ForInStatNode(self, node):
self.startline(u"for ")
self.visit(node.target)
self.put(u" in ")
self.visit(node.iterator.sequence)
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
if node.else_clause is not None:
self.line(u"else:")
self.indent()
self.visit(node.else_clause)
self.dedent()
def visit_IfStatNode(self, node):
# The IfClauseNode is handled directly without a separate match
# for clariy.
self.startline(u"if ")
self.visit(node.if_clauses[0].condition)
self.endline(":")
self.indent()
self.visit(node.if_clauses[0].body)
self.dedent()
for clause in node.if_clauses[1:]:
self.startline("elif ")
self.visit(clause.condition)
self.endline(":")
self.indent()
self.visit(clause.body)
self.dedent()
if node.else_clause is not None:
self.line("else:")
self.indent()
self.visit(node.else_clause)
self.dedent()
def visit_SequenceNode(self, node):
self.comma_separated_list(node.args) # Might need to discover whether we need () around tuples...hmm...
def visit_SimpleCallNode(self, node):
self.visit(node.function)
self.put(u"(")
self.comma_separated_list(node.args)
self.put(")")
def visit_GeneralCallNode(self, node):
self.visit(node.function)
self.put(u"(")
posarg = node.positional_args
if isinstance(posarg, AsTupleNode):
self.visit(posarg.arg)
else:
self.comma_separated_list(posarg.args) # TupleNode.args
if node.keyword_args:
if isinstance(node.keyword_args, DictNode):
for i, (name, value) in enumerate(node.keyword_args.key_value_pairs):
if i > 0:
self.put(', ')
self.visit(name)
self.put('=')
self.visit(value)
else:
raise Exception("Not implemented yet")
self.put(u")")
def visit_ExprStatNode(self, node):
self.startline()
self.visit(node.expr)
self.endline()
def visit_InPlaceAssignmentNode(self, node):
self.startline()
self.visit(node.lhs)
self.put(u" %s= " % node.operator)
self.visit(node.rhs)
self.endline()
def visit_WithStatNode(self, node):
self.startline()
self.put(u"with ")
self.visit(node.manager)
if node.target is not None:
self.put(u" as ")
self.visit(node.target)
self.endline(u":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_TryFinallyStatNode(self, node):
self.line(u"try:")
self.indent()
self.visit(node.body)
self.dedent()
self.line(u"finally:")
self.indent()
self.visit(node.finally_clause)
self.dedent()
def visit_TryExceptStatNode(self, node):
self.line(u"try:")
self.indent()
self.visit(node.body)
self.dedent()
for x in node.except_clauses:
self.visit(x)
if node.else_clause is not None:
self.visit(node.else_clause)
def visit_ExceptClauseNode(self, node):
self.startline(u"except")
if node.pattern is not None:
self.put(u" ")
self.visit(node.pattern)
if node.target is not None:
self.put(u", ")
self.visit(node.target)
self.endline(":")
self.indent()
self.visit(node.body)
self.dedent()
def visit_ReturnStatNode(self, node):
self.startline("return ")
self.visit(node.value)
self.endline()
def visit_ReraiseStatNode(self, node):
self.line("raise")
def visit_ImportNode(self, node):
self.put(u"(import %s)" % node.module_name.value)
def visit_TempsBlockNode(self, node):
"""
Temporaries are output like $1_1', where the first number is
an index of the TempsBlockNode and the second number is an index
of the temporary which that block allocates.
"""
idx = 0
for handle in node.temps:
self.tempnames[handle] = "$%d_%d" % (self.tempblockindex, idx)
idx += 1
self.tempblockindex += 1
self.visit(node.body)
def visit_TempRefNode(self, node):
self.put(self.tempnames[node.handle])
class PxdWriter(DeclarationWriter):
def __call__(self, node):
print(u'\n'.join(self.write(node).lines))
return node
def visit_CFuncDefNode(self, node):
if 'inline' in node.modifiers:
return
if node.overridable:
self.startline(u'cpdef ')
else:
self.startline(u'cdef ')
if node.visibility != 'private':
self.put(node.visibility)
self.put(u' ')
if node.api:
self.put(u'api ')
self.visit(node.declarator)
def visit_StatNode(self, node):
pass
class ExpressionWriter(TreeVisitor):
def __init__(self, result=None):
super(ExpressionWriter, self).__init__()
if result is None:
result = u""
self.result = result
self.precedence = [0]
def write(self, tree):
self.visit(tree)
return self.result
def put(self, s):
self.result += s
def remove(self, s):
if self.result.endswith(s):
self.result = self.result[:-len(s)]
def comma_separated_list(self, items):
if len(items) > 0:
for item in items[:-1]:
self.visit(item)
self.put(u", ")
self.visit(items[-1])
def visit_Node(self, node):
raise AssertionError("Node not handled by serializer: %r" % node)
def visit_NameNode(self, node):
self.put(node.name)
def visit_NoneNode(self, node):
self.put(u"None")
def visit_EllipsisNode(self, node):
self.put(u"...")
def visit_BoolNode(self, node):
self.put(str(node.value))
def visit_ConstNode(self, node):
self.put(str(node.value))
def visit_ImagNode(self, node):
self.put(node.value)
self.put(u"j")
def emit_string(self, node, prefix=u""):
repr_val = repr(node.value)
if repr_val[0] in 'ub':
repr_val = repr_val[1:]
self.put(u"%s%s" % (prefix, repr_val))
def visit_BytesNode(self, node):
self.emit_string(node, u"b")
def visit_StringNode(self, node):
self.emit_string(node)
def visit_UnicodeNode(self, node):
self.emit_string(node, u"u")
def emit_sequence(self, node, parens=(u"", u"")):
open_paren, close_paren = parens
items = node.subexpr_nodes()
self.put(open_paren)
self.comma_separated_list(items)
self.put(close_paren)
def visit_ListNode(self, node):
self.emit_sequence(node, u"[]")
def visit_TupleNode(self, node):
self.emit_sequence(node, u"()")
def visit_SetNode(self, node):
if len(node.subexpr_nodes()) > 0:
self.emit_sequence(node, u"{}")
else:
self.put(u"set()")
def visit_DictNode(self, node):
self.emit_sequence(node, u"{}")
def visit_DictItemNode(self, node):
self.visit(node.key)
self.put(u": ")
self.visit(node.value)
unop_precedence = {
'not': 3, '!': 3,
'+': 11, '-': 11, '~': 11,
}
binop_precedence = {
'or': 1,
'and': 2,
# unary: 'not': 3, '!': 3,
'in': 4, 'not_in': 4, 'is': 4, 'is_not': 4, '<': 4, '<=': 4, '>': 4, '>=': 4, '!=': 4, '==': 4,
'|': 5,
'^': 6,
'&': 7,
'<<': 8, '>>': 8,
'+': 9, '-': 9,
'*': 10, '@': 10, '/': 10, '//': 10, '%': 10,
# unary: '+': 11, '-': 11, '~': 11
'**': 12,
}
def operator_enter(self, new_prec):
old_prec = self.precedence[-1]
if old_prec > new_prec:
self.put(u"(")
self.precedence.append(new_prec)
def operator_exit(self):
old_prec, new_prec = self.precedence[-2:]
if old_prec > new_prec:
self.put(u")")
self.precedence.pop()
def visit_NotNode(self, node):
op = 'not'
prec = self.unop_precedence[op]
self.operator_enter(prec)
self.put(u"not ")
self.visit(node.operand)
self.operator_exit()
def visit_UnopNode(self, node):
op = node.operator
prec = self.unop_precedence[op]
self.operator_enter(prec)
self.put(u"%s" % node.operator)
self.visit(node.operand)
self.operator_exit()
def visit_BinopNode(self, node):
op = node.operator
prec = self.binop_precedence.get(op, 0)
self.operator_enter(prec)
self.visit(node.operand1)
self.put(u" %s " % op.replace('_', ' '))
self.visit(node.operand2)
self.operator_exit()
def visit_BoolBinopNode(self, node):
self.visit_BinopNode(node)
def visit_PrimaryCmpNode(self, node):
self.visit_BinopNode(node)
def visit_IndexNode(self, node):
self.visit(node.base)
self.put(u"[")
if isinstance(node.index, TupleNode):
self.emit_sequence(node.index)
else:
self.visit(node.index)
self.put(u"]")
def visit_SliceIndexNode(self, node):
self.visit(node.base)
self.put(u"[")
if node.start:
self.visit(node.start)
self.put(u":")
if node.stop:
self.visit(node.stop)
if node.slice:
self.put(u":")
self.visit(node.slice)
self.put(u"]")
def visit_SliceNode(self, node):
if not node.start.is_none:
self.visit(node.start)
self.put(u":")
if not node.stop.is_none:
self.visit(node.stop)
if not node.step.is_none:
self.put(u":")
self.visit(node.step)
def visit_CondExprNode(self, node):
self.visit(node.true_val)
self.put(u" if ")
self.visit(node.test)
self.put(u" else ")
self.visit(node.false_val)
def visit_AttributeNode(self, node):
self.visit(node.obj)
self.put(u".%s" % node.attribute)
def visit_SimpleCallNode(self, node):
self.visit(node.function)
self.put(u"(")
self.comma_separated_list(node.args)
self.put(")")
def emit_pos_args(self, node):
if node is None:
return
if isinstance(node, AddNode):
self.emit_pos_args(node.operand1)
self.emit_pos_args(node.operand2)
elif isinstance(node, TupleNode):
for expr in node.subexpr_nodes():
self.visit(expr)
self.put(u", ")
elif isinstance(node, AsTupleNode):
self.put("*")
self.visit(node.arg)
self.put(u", ")
else:
self.visit(node)
self.put(u", ")
def emit_kwd_args(self, node):
if node is None:
return
if isinstance(node, MergedDictNode):
for expr in node.subexpr_nodes():
self.emit_kwd_args(expr)
elif isinstance(node, DictNode):
for expr in node.subexpr_nodes():
self.put(u"%s=" % expr.key.value)
self.visit(expr.value)
self.put(u", ")
else:
self.put(u"**")
self.visit(node)
self.put(u", ")
def visit_GeneralCallNode(self, node):
self.visit(node.function)
self.put(u"(")
self.emit_pos_args(node.positional_args)
self.emit_kwd_args(node.keyword_args)
self.remove(u", ")
self.put(")")
def emit_comprehension(self, body, target,
sequence, condition,
parens=(u"", u"")):
open_paren, close_paren = parens
self.put(open_paren)
self.visit(body)
self.put(u" for ")
self.visit(target)
self.put(u" in ")
self.visit(sequence)
if condition:
self.put(u" if ")
self.visit(condition)
self.put(close_paren)
def visit_ComprehensionAppendNode(self, node):
self.visit(node.expr)
def visit_DictComprehensionAppendNode(self, node):
self.visit(node.key_expr)
self.put(u": ")
self.visit(node.value_expr)
def visit_ComprehensionNode(self, node):
tpmap = {'list': u"[]", 'dict': u"{}", 'set': u"{}"}
parens = tpmap[node.type.py_type_name()]
body = node.loop.body
target = node.loop.target
sequence = node.loop.iterator.sequence
condition = None
if hasattr(body, 'if_clauses'):
# type(body) is Nodes.IfStatNode
condition = body.if_clauses[0].condition
body = body.if_clauses[0].body
self.emit_comprehension(body, target, sequence, condition, parens)
def visit_GeneratorExpressionNode(self, node):
body = node.loop.body
target = node.loop.target
sequence = node.loop.iterator.sequence
condition = None
if hasattr(body, 'if_clauses'):
# type(body) is Nodes.IfStatNode
condition = body.if_clauses[0].condition
body = body.if_clauses[0].body.expr.arg
elif hasattr(body, 'expr'):
# type(body) is Nodes.ExprStatNode
body = body.expr.arg
self.emit_comprehension(body, target, sequence, condition, u"()")
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055920.660294
Cython-0.29.28/Cython/Compiler/ 0000755 0001751 0000171 00000000000 00000000000 016756 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/AnalysedTreeTransforms.py 0000644 0001751 0000171 00000007362 00000000000 023777 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
from .Visitor import ScopeTrackingTransform
from .Nodes import StatListNode, SingleAssignmentNode, CFuncDefNode, DefNode
from .ExprNodes import DictNode, DictItemNode, NameNode, UnicodeNode
from .PyrexTypes import py_object_type
from .StringEncoding import EncodedString
from . import Symtab
class AutoTestDictTransform(ScopeTrackingTransform):
# Handles autotestdict directive
blacklist = ['__cinit__', '__dealloc__', '__richcmp__',
'__nonzero__', '__bool__',
'__len__', '__contains__']
def visit_ModuleNode(self, node):
if node.is_pxd:
return node
self.scope_type = 'module'
self.scope_node = node
if not self.current_directives['autotestdict']:
return node
self.all_docstrings = self.current_directives['autotestdict.all']
self.cdef_docstrings = self.all_docstrings or self.current_directives['autotestdict.cdef']
assert isinstance(node.body, StatListNode)
# First see if __test__ is already created
if u'__test__' in node.scope.entries:
# Do nothing
return node
pos = node.pos
self.tests = []
self.testspos = node.pos
test_dict_entry = node.scope.declare_var(EncodedString(u'__test__'),
py_object_type,
pos,
visibility='public')
create_test_dict_assignment = SingleAssignmentNode(pos,
lhs=NameNode(pos, name=EncodedString(u'__test__'),
entry=test_dict_entry),
rhs=DictNode(pos, key_value_pairs=self.tests))
self.visitchildren(node)
node.body.stats.append(create_test_dict_assignment)
return node
def add_test(self, testpos, path, doctest):
pos = self.testspos
keystr = u'%s (line %d)' % (path, testpos[1])
key = UnicodeNode(pos, value=EncodedString(keystr))
value = UnicodeNode(pos, value=doctest)
self.tests.append(DictItemNode(pos, key=key, value=value))
def visit_ExprNode(self, node):
# expressions cannot contain functions and lambda expressions
# do not have a docstring
return node
def visit_FuncDefNode(self, node):
if not node.doc or (isinstance(node, DefNode) and node.fused_py_func):
return node
if not self.cdef_docstrings:
if isinstance(node, CFuncDefNode) and not node.py_func:
return node
if not self.all_docstrings and '>>>' not in node.doc:
return node
pos = self.testspos
if self.scope_type == 'module':
path = node.entry.name
elif self.scope_type in ('pyclass', 'cclass'):
if isinstance(node, CFuncDefNode):
if node.py_func is not None:
name = node.py_func.name
else:
name = node.entry.name
else:
name = node.name
if self.scope_type == 'cclass' and name in self.blacklist:
return node
if self.scope_type == 'pyclass':
class_name = self.scope_node.name
else:
class_name = self.scope_node.class_name
if isinstance(node.entry.scope, Symtab.PropertyScope):
property_method_name = node.entry.scope.name
path = "%s.%s.%s" % (class_name, node.entry.scope.name,
node.entry.name)
else:
path = "%s.%s" % (class_name, node.entry.name)
else:
assert False
self.add_test(node.pos, path, node.doc)
return node
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/Annotate.py 0000644 0001751 0000171 00000031226 00000000000 021105 0 ustar 00runner docker 0000000 0000000 # Note: Work in progress
from __future__ import absolute_import
import os
import os.path
import re
import codecs
import textwrap
from datetime import datetime
from functools import partial
from collections import defaultdict
from xml.sax.saxutils import escape as html_escape
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # does not support writing 'str' in Py2
from . import Version
from .Code import CCodeWriter
from .. import Utils
class AnnotationCCodeWriter(CCodeWriter):
def __init__(self, create_from=None, buffer=None, copy_formatting=True):
CCodeWriter.__init__(self, create_from, buffer, copy_formatting=copy_formatting)
if create_from is None:
self.annotation_buffer = StringIO()
self.last_annotated_pos = None
# annotations[filename][line] -> [(column, AnnotationItem)*]
self.annotations = defaultdict(partial(defaultdict, list))
# code[filename][line] -> str
self.code = defaultdict(partial(defaultdict, str))
# scopes[filename][line] -> set(scopes)
self.scopes = defaultdict(partial(defaultdict, set))
else:
# When creating an insertion point, keep references to the same database
self.annotation_buffer = create_from.annotation_buffer
self.annotations = create_from.annotations
self.code = create_from.code
self.scopes = create_from.scopes
self.last_annotated_pos = create_from.last_annotated_pos
def create_new(self, create_from, buffer, copy_formatting):
return AnnotationCCodeWriter(create_from, buffer, copy_formatting)
def write(self, s):
CCodeWriter.write(self, s)
self.annotation_buffer.write(s)
def mark_pos(self, pos, trace=True):
if pos is not None:
CCodeWriter.mark_pos(self, pos, trace)
if self.funcstate and self.funcstate.scope:
# lambdas and genexprs can result in multiple scopes per line => keep them in a set
self.scopes[pos[0].filename][pos[1]].add(self.funcstate.scope)
if self.last_annotated_pos:
source_desc, line, _ = self.last_annotated_pos
pos_code = self.code[source_desc.filename]
pos_code[line] += self.annotation_buffer.getvalue()
self.annotation_buffer = StringIO()
self.last_annotated_pos = pos
def annotate(self, pos, item):
self.annotations[pos[0].filename][pos[1]].append((pos[2], item))
def _css(self):
"""css template will later allow to choose a colormap"""
css = [self._css_template]
for i in range(255):
color = u"FFFF%02x" % int(255/(1+i/10.0))
css.append('.cython.score-%d {background-color: #%s;}' % (i, color))
try:
from pygments.formatters import HtmlFormatter
except ImportError:
pass
else:
css.append(HtmlFormatter().get_style_defs('.cython'))
return '\n'.join(css)
_css_template = textwrap.dedent("""
body.cython { font-family: courier; font-size: 12; }
.cython.tag { }
.cython.line { margin: 0em }
.cython.code { font-size: 9; color: #444444; display: none; margin: 0px 0px 0px 8px; border-left: 8px none; }
.cython.line .run { background-color: #B0FFB0; }
.cython.line .mis { background-color: #FFB0B0; }
.cython.code.run { border-left: 8px solid #B0FFB0; }
.cython.code.mis { border-left: 8px solid #FFB0B0; }
.cython.code .py_c_api { color: red; }
.cython.code .py_macro_api { color: #FF7000; }
.cython.code .pyx_c_api { color: #FF3000; }
.cython.code .pyx_macro_api { color: #FF7000; }
.cython.code .refnanny { color: #FFA000; }
.cython.code .trace { color: #FFA000; }
.cython.code .error_goto { color: #FFA000; }
.cython.code .coerce { color: #008000; border: 1px dotted #008000 }
.cython.code .py_attr { color: #FF0000; font-weight: bold; }
.cython.code .c_attr { color: #0000FF; }
.cython.code .py_call { color: #FF0000; font-weight: bold; }
.cython.code .c_call { color: #0000FF; }
""")
# on-click toggle function to show/hide C source code
_onclick_attr = ' onclick="{0}"'.format((
"(function(s){"
" s.display = s.display === 'block' ? 'none' : 'block'"
"})(this.nextElementSibling.style)"
).replace(' ', '') # poor dev's JS minification
)
def save_annotation(self, source_filename, target_filename, coverage_xml=None):
with Utils.open_source_file(source_filename) as f:
code = f.read()
generated_code = self.code.get(source_filename, {})
c_file = Utils.decode_filename(os.path.basename(target_filename))
html_filename = os.path.splitext(target_filename)[0] + ".html"
with codecs.open(html_filename, "w", encoding="UTF-8") as out_buffer:
out_buffer.write(self._save_annotation(code, generated_code, c_file, source_filename, coverage_xml))
def _save_annotation_header(self, c_file, source_filename, coverage_timestamp=None):
coverage_info = ''
if coverage_timestamp:
coverage_info = u' with coverage data from {timestamp}'.format(
timestamp=datetime.fromtimestamp(int(coverage_timestamp) // 1000))
outlist = [
textwrap.dedent(u'''\
Cython: {filename}
Generated by Cython {watermark} {more_info}
Yellow lines hint at Python interaction.
Click on a line that starts with a "+
" to see the C code that Cython generated for it.
''').format(css=self._css(), watermark=Version.watermark,
filename=os.path.basename(source_filename) if source_filename else '',
more_info=coverage_info)
]
if c_file:
outlist.append(u'Raw output: %s
\n' % (c_file, c_file))
return outlist
def _save_annotation_footer(self):
return (u'\n',)
def _save_annotation(self, code, generated_code, c_file=None, source_filename=None, coverage_xml=None):
"""
lines : original cython source code split by lines
generated_code : generated c code keyed by line number in original file
target filename : name of the file in which to store the generated html
c_file : filename in which the c_code has been written
"""
if coverage_xml is not None and source_filename:
coverage_timestamp = coverage_xml.get('timestamp', '').strip()
covered_lines = self._get_line_coverage(coverage_xml, source_filename)
else:
coverage_timestamp = covered_lines = None
annotation_items = dict(self.annotations[source_filename])
scopes = dict(self.scopes[source_filename])
outlist = []
outlist.extend(self._save_annotation_header(c_file, source_filename, coverage_timestamp))
outlist.extend(self._save_annotation_body(code, generated_code, annotation_items, scopes, covered_lines))
outlist.extend(self._save_annotation_footer())
return ''.join(outlist)
def _get_line_coverage(self, coverage_xml, source_filename):
coverage_data = None
for entry in coverage_xml.iterfind('.//class'):
if not entry.get('filename'):
continue
if (entry.get('filename') == source_filename or
os.path.abspath(entry.get('filename')) == source_filename):
coverage_data = entry
break
elif source_filename.endswith(entry.get('filename')):
coverage_data = entry # but we might still find a better match...
if coverage_data is None:
return None
return dict(
(int(line.get('number')), int(line.get('hits')))
for line in coverage_data.iterfind('lines/line')
)
def _htmlify_code(self, code):
try:
from pygments import highlight
from pygments.lexers import CythonLexer
from pygments.formatters import HtmlFormatter
except ImportError:
# no Pygments, just escape the code
return html_escape(code)
html_code = highlight(
code, CythonLexer(stripnl=False, stripall=False),
HtmlFormatter(nowrap=True))
return html_code
def _save_annotation_body(self, cython_code, generated_code, annotation_items, scopes, covered_lines=None):
outlist = [u'']
pos_comment_marker = u'/* \N{HORIZONTAL ELLIPSIS} */\n'
new_calls_map = dict(
(name, 0) for name in
'refnanny trace py_macro_api py_c_api pyx_macro_api pyx_c_api error_goto'.split()
).copy
self.mark_pos(None)
def annotate(match):
group_name = match.lastgroup
calls[group_name] += 1
return u"
%s " % (
group_name, match.group(group_name))
lines = self._htmlify_code(cython_code).splitlines()
lineno_width = len(str(len(lines)))
if not covered_lines:
covered_lines = None
for k, line in enumerate(lines, 1):
try:
c_code = generated_code[k]
except KeyError:
c_code = ''
else:
c_code = _replace_pos_comment(pos_comment_marker, c_code)
if c_code.startswith(pos_comment_marker):
c_code = c_code[len(pos_comment_marker):]
c_code = html_escape(c_code)
calls = new_calls_map()
c_code = _parse_code(annotate, c_code)
score = (5 * calls['py_c_api'] + 2 * calls['pyx_c_api'] +
calls['py_macro_api'] + calls['pyx_macro_api'])
if c_code:
onclick = self._onclick_attr
expandsymbol = '+'
else:
onclick = ''
expandsymbol = ' '
covered = ''
if covered_lines is not None and k in covered_lines:
hits = covered_lines[k]
if hits is not None:
covered = 'run' if hits else 'mis'
outlist.append(
u'
'
# generate line number with expand symbol in front,
# and the right number of digit
u'{expandsymbol}{line:0{lineno_width}d} : {code} \n'.format(
score=score,
expandsymbol=expandsymbol,
covered=covered,
lineno_width=lineno_width,
line=k,
code=line.rstrip(),
onclick=onclick,
))
if c_code:
outlist.append(u"
{code} ".format(
score=score, covered=covered, code=c_code))
outlist.append(u"
")
return outlist
_parse_code = re.compile((
br'(?P__Pyx_X?(?:GOT|GIVE)REF|__Pyx_RefNanny[A-Za-z]+)|'
br'(?P__Pyx_Trace[A-Za-z]+)|'
br'(?:'
br'(?P__Pyx_[A-Z][A-Z_]+)|'
br'(?P(?:__Pyx_[A-Z][a-z_][A-Za-z_]*)|__pyx_convert_[A-Za-z_]*)|'
br'(?PPy[A-Z][a-z]+_[A-Z][A-Z_]+)|'
br'(?PPy[A-Z][a-z]+_[A-Z][a-z][A-Za-z_]*)'
br')(?=\()|' # look-ahead to exclude subsequent '(' from replacement
br'(?P(?:(?<=;) *if [^;]* +)?__PYX_ERR\([^)]+\))'
).decode('ascii')).sub
_replace_pos_comment = re.compile(
# this matches what Cython generates as code line marker comment
br'^\s*/\*(?:(?:[^*]|\*[^/])*\n)+\s*\*/\s*\n'.decode('ascii'),
re.M
).sub
class AnnotationItem(object):
def __init__(self, style, text, tag="", size=0):
self.style = style
self.text = text
self.tag = tag
self.size = size
def start(self):
return u"%s" % (self.style, self.text, self.tag)
def end(self):
return self.size, u" "
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/AutoDocTransforms.py 0000644 0001751 0000171 00000016535 00000000000 022757 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import, print_function
from .Visitor import CythonTransform
from .StringEncoding import EncodedString
from . import Options
from . import PyrexTypes, ExprNodes
from ..CodeWriter import ExpressionWriter
class AnnotationWriter(ExpressionWriter):
def visit_Node(self, node):
self.put(u"??>")
def visit_LambdaNode(self, node):
# XXX Should we do better?
self.put("")
class EmbedSignature(CythonTransform):
def __init__(self, context):
super(EmbedSignature, self).__init__(context)
self.class_name = None
self.class_node = None
def _fmt_expr(self, node):
writer = AnnotationWriter()
result = writer.write(node)
# print(type(node).__name__, '-->', result)
return result
def _fmt_arg(self, arg):
if arg.type is PyrexTypes.py_object_type or arg.is_self_arg:
doc = arg.name
else:
doc = arg.type.declaration_code(arg.name, for_display=1)
if arg.annotation:
annotation = self._fmt_expr(arg.annotation)
doc = doc + (': %s' % annotation)
if arg.default:
default = self._fmt_expr(arg.default)
doc = doc + (' = %s' % default)
elif arg.default:
default = self._fmt_expr(arg.default)
doc = doc + ('=%s' % default)
return doc
def _fmt_star_arg(self, arg):
arg_doc = arg.name
if arg.annotation:
annotation = self._fmt_expr(arg.annotation)
arg_doc = arg_doc + (': %s' % annotation)
return arg_doc
def _fmt_arglist(self, args,
npargs=0, pargs=None,
nkargs=0, kargs=None,
hide_self=False):
arglist = []
for arg in args:
if not hide_self or not arg.entry.is_self_arg:
arg_doc = self._fmt_arg(arg)
arglist.append(arg_doc)
if pargs:
arg_doc = self._fmt_star_arg(pargs)
arglist.insert(npargs, '*%s' % arg_doc)
elif nkargs:
arglist.insert(npargs, '*')
if kargs:
arg_doc = self._fmt_star_arg(kargs)
arglist.append('**%s' % arg_doc)
return arglist
def _fmt_ret_type(self, ret):
if ret is PyrexTypes.py_object_type:
return None
else:
return ret.declaration_code("", for_display=1)
def _fmt_signature(self, cls_name, func_name, args,
npargs=0, pargs=None,
nkargs=0, kargs=None,
return_expr=None,
return_type=None, hide_self=False):
arglist = self._fmt_arglist(args,
npargs, pargs,
nkargs, kargs,
hide_self=hide_self)
arglist_doc = ', '.join(arglist)
func_doc = '%s(%s)' % (func_name, arglist_doc)
if cls_name:
func_doc = '%s.%s' % (cls_name, func_doc)
ret_doc = None
if return_expr:
ret_doc = self._fmt_expr(return_expr)
elif return_type:
ret_doc = self._fmt_ret_type(return_type)
if ret_doc:
func_doc = '%s -> %s' % (func_doc, ret_doc)
return func_doc
def _embed_signature(self, signature, node_doc):
if node_doc:
return "%s\n%s" % (signature, node_doc)
else:
return signature
def __call__(self, node):
if not Options.docstrings:
return node
else:
return super(EmbedSignature, self).__call__(node)
def visit_ClassDefNode(self, node):
oldname = self.class_name
oldclass = self.class_node
self.class_node = node
try:
# PyClassDefNode
self.class_name = node.name
except AttributeError:
# CClassDefNode
self.class_name = node.class_name
self.visitchildren(node)
self.class_name = oldname
self.class_node = oldclass
return node
def visit_LambdaNode(self, node):
# lambda expressions so not have signature or inner functions
return node
def visit_DefNode(self, node):
if not self.current_directives['embedsignature']:
return node
is_constructor = False
hide_self = False
if node.entry.is_special:
is_constructor = self.class_node and node.name == '__init__'
if not is_constructor:
return node
class_name, func_name = None, self.class_name
hide_self = True
else:
class_name, func_name = self.class_name, node.name
nkargs = getattr(node, 'num_kwonly_args', 0)
npargs = len(node.args) - nkargs
signature = self._fmt_signature(
class_name, func_name, node.args,
npargs, node.star_arg,
nkargs, node.starstar_arg,
return_expr=node.return_type_annotation,
return_type=None, hide_self=hide_self)
if signature:
if is_constructor:
doc_holder = self.class_node.entry.type.scope
else:
doc_holder = node.entry
if doc_holder.doc is not None:
old_doc = doc_holder.doc
elif not is_constructor and getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
doc_holder.doc = EncodedString(new_doc)
if not is_constructor and getattr(node, 'py_func', None) is not None:
node.py_func.entry.doc = EncodedString(new_doc)
return node
def visit_CFuncDefNode(self, node):
if not self.current_directives['embedsignature']:
return node
if not node.overridable: # not cpdef FOO(...):
return node
signature = self._fmt_signature(
self.class_name, node.declarator.base.name,
node.declarator.args,
return_type=node.return_type)
if signature:
if node.entry.doc is not None:
old_doc = node.entry.doc
elif getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
node.entry.doc = EncodedString(new_doc)
if hasattr(node, 'py_func') and node.py_func is not None:
node.py_func.entry.doc = EncodedString(new_doc)
return node
def visit_PropertyNode(self, node):
if not self.current_directives['embedsignature']:
return node
entry = node.entry
if entry.visibility == 'public':
# property synthesised from a cdef public attribute
type_name = entry.type.declaration_code("", for_display=1)
if not entry.type.is_pyobject:
type_name = "'%s'" % type_name
elif entry.type.is_extension_type:
type_name = entry.type.module_name + '.' + type_name
signature = '%s: %s' % (entry.name, type_name)
new_doc = self._embed_signature(signature, entry.doc)
entry.doc = EncodedString(new_doc)
return node
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/Buffer.py 0000644 0001751 0000171 00000070553 00000000000 020553 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
from .Visitor import CythonTransform
from .ModuleNode import ModuleNode
from .Errors import CompileError
from .UtilityCode import CythonUtilityCode
from .Code import UtilityCode, TempitaUtilityCode
from . import Options
from . import Interpreter
from . import PyrexTypes
from . import Naming
from . import Symtab
def dedent(text, reindent=0):
from textwrap import dedent
text = dedent(text)
if reindent > 0:
indent = " " * reindent
text = '\n'.join([indent + x for x in text.split('\n')])
return text
class IntroduceBufferAuxiliaryVars(CythonTransform):
#
# Entry point
#
buffers_exists = False
using_memoryview = False
def __call__(self, node):
assert isinstance(node, ModuleNode)
self.max_ndim = 0
result = super(IntroduceBufferAuxiliaryVars, self).__call__(node)
if self.buffers_exists:
use_bufstruct_declare_code(node.scope)
use_py2_buffer_functions(node.scope)
return result
#
# Basic operations for transforms
#
def handle_scope(self, node, scope):
# For all buffers, insert extra variables in the scope.
# The variables are also accessible from the buffer_info
# on the buffer entry
scope_items = scope.entries.items()
bufvars = [entry for name, entry in scope_items if entry.type.is_buffer]
if len(bufvars) > 0:
bufvars.sort(key=lambda entry: entry.name)
self.buffers_exists = True
memviewslicevars = [entry for name, entry in scope_items if entry.type.is_memoryviewslice]
if len(memviewslicevars) > 0:
self.buffers_exists = True
for (name, entry) in scope_items:
if name == 'memoryview' and isinstance(entry.utility_code_definition, CythonUtilityCode):
self.using_memoryview = True
break
del scope_items
if isinstance(node, ModuleNode) and len(bufvars) > 0:
# for now...note that pos is wrong
raise CompileError(node.pos, "Buffer vars not allowed in module scope")
for entry in bufvars:
if entry.type.dtype.is_ptr:
raise CompileError(node.pos, "Buffers with pointer types not yet supported.")
name = entry.name
buftype = entry.type
if buftype.ndim > Options.buffer_max_dims:
raise CompileError(node.pos,
"Buffer ndims exceeds Options.buffer_max_dims = %d" % Options.buffer_max_dims)
if buftype.ndim > self.max_ndim:
self.max_ndim = buftype.ndim
# Declare auxiliary vars
def decvar(type, prefix):
cname = scope.mangle(prefix, name)
aux_var = scope.declare_var(name=None, cname=cname,
type=type, pos=node.pos)
if entry.is_arg:
aux_var.used = True # otherwise, NameNode will mark whether it is used
return aux_var
auxvars = ((PyrexTypes.c_pyx_buffer_nd_type, Naming.pybuffernd_prefix),
(PyrexTypes.c_pyx_buffer_type, Naming.pybufferstruct_prefix))
pybuffernd, rcbuffer = [decvar(type, prefix) for (type, prefix) in auxvars]
entry.buffer_aux = Symtab.BufferAux(pybuffernd, rcbuffer)
scope.buffer_entries = bufvars
self.scope = scope
def visit_ModuleNode(self, node):
self.handle_scope(node, node.scope)
self.visitchildren(node)
return node
def visit_FuncDefNode(self, node):
self.handle_scope(node, node.local_scope)
self.visitchildren(node)
return node
#
# Analysis
#
buffer_options = ("dtype", "ndim", "mode", "negative_indices", "cast") # ordered!
buffer_defaults = {"ndim": 1, "mode": "full", "negative_indices": True, "cast": False}
buffer_positional_options_count = 1 # anything beyond this needs keyword argument
ERR_BUF_OPTION_UNKNOWN = '"%s" is not a buffer option'
ERR_BUF_TOO_MANY = 'Too many buffer options'
ERR_BUF_DUP = '"%s" buffer option already supplied'
ERR_BUF_MISSING = '"%s" missing'
ERR_BUF_MODE = 'Only allowed buffer modes are: "c", "fortran", "full", "strided" (as a compile-time string)'
ERR_BUF_NDIM = 'ndim must be a non-negative integer'
ERR_BUF_DTYPE = 'dtype must be "object", numeric type or a struct'
ERR_BUF_BOOL = '"%s" must be a boolean'
def analyse_buffer_options(globalpos, env, posargs, dictargs, defaults=None, need_complete=True):
"""
Must be called during type analysis, as analyse is called
on the dtype argument.
posargs and dictargs should consist of a list and a dict
of tuples (value, pos). Defaults should be a dict of values.
Returns a dict containing all the options a buffer can have and
its value (with the positions stripped).
"""
if defaults is None:
defaults = buffer_defaults
posargs, dictargs = Interpreter.interpret_compiletime_options(
posargs, dictargs, type_env=env, type_args=(0, 'dtype'))
if len(posargs) > buffer_positional_options_count:
raise CompileError(posargs[-1][1], ERR_BUF_TOO_MANY)
options = {}
for name, (value, pos) in dictargs.items():
if not name in buffer_options:
raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
options[name] = value
for name, (value, pos) in zip(buffer_options, posargs):
if not name in buffer_options:
raise CompileError(pos, ERR_BUF_OPTION_UNKNOWN % name)
if name in options:
raise CompileError(pos, ERR_BUF_DUP % name)
options[name] = value
# Check that they are all there and copy defaults
for name in buffer_options:
if not name in options:
try:
options[name] = defaults[name]
except KeyError:
if need_complete:
raise CompileError(globalpos, ERR_BUF_MISSING % name)
dtype = options.get("dtype")
if dtype and dtype.is_extension_type:
raise CompileError(globalpos, ERR_BUF_DTYPE)
ndim = options.get("ndim")
if ndim and (not isinstance(ndim, int) or ndim < 0):
raise CompileError(globalpos, ERR_BUF_NDIM)
mode = options.get("mode")
if mode and not (mode in ('full', 'strided', 'c', 'fortran')):
raise CompileError(globalpos, ERR_BUF_MODE)
def assert_bool(name):
x = options.get(name)
if not isinstance(x, bool):
raise CompileError(globalpos, ERR_BUF_BOOL % name)
assert_bool('negative_indices')
assert_bool('cast')
return options
#
# Code generation
#
class BufferEntry(object):
def __init__(self, entry):
self.entry = entry
self.type = entry.type
self.cname = entry.buffer_aux.buflocal_nd_var.cname
self.buf_ptr = "%s.rcbuffer->pybuffer.buf" % self.cname
self.buf_ptr_type = entry.type.buffer_ptr_type
self.init_attributes()
def init_attributes(self):
self.shape = self.get_buf_shapevars()
self.strides = self.get_buf_stridevars()
self.suboffsets = self.get_buf_suboffsetvars()
def get_buf_suboffsetvars(self):
return self._for_all_ndim("%s.diminfo[%d].suboffsets")
def get_buf_stridevars(self):
return self._for_all_ndim("%s.diminfo[%d].strides")
def get_buf_shapevars(self):
return self._for_all_ndim("%s.diminfo[%d].shape")
def _for_all_ndim(self, s):
return [s % (self.cname, i) for i in range(self.type.ndim)]
def generate_buffer_lookup_code(self, code, index_cnames):
# Create buffer lookup and return it
# This is done via utility macros/inline functions, which vary
# according to the access mode used.
params = []
nd = self.type.ndim
mode = self.type.mode
if mode == 'full':
for i, s, o in zip(index_cnames,
self.get_buf_stridevars(),
self.get_buf_suboffsetvars()):
params.append(i)
params.append(s)
params.append(o)
funcname = "__Pyx_BufPtrFull%dd" % nd
funcgen = buf_lookup_full_code
else:
if mode == 'strided':
funcname = "__Pyx_BufPtrStrided%dd" % nd
funcgen = buf_lookup_strided_code
elif mode == 'c':
funcname = "__Pyx_BufPtrCContig%dd" % nd
funcgen = buf_lookup_c_code
elif mode == 'fortran':
funcname = "__Pyx_BufPtrFortranContig%dd" % nd
funcgen = buf_lookup_fortran_code
else:
assert False
for i, s in zip(index_cnames, self.get_buf_stridevars()):
params.append(i)
params.append(s)
# Make sure the utility code is available
if funcname not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(funcname)
protocode = code.globalstate['utility_code_proto']
defcode = code.globalstate['utility_code_def']
funcgen(protocode, defcode, name=funcname, nd=nd)
buf_ptr_type_code = self.buf_ptr_type.empty_declaration_code()
ptrcode = "%s(%s, %s, %s)" % (funcname, buf_ptr_type_code, self.buf_ptr,
", ".join(params))
return ptrcode
def get_flags(buffer_aux, buffer_type):
flags = 'PyBUF_FORMAT'
mode = buffer_type.mode
if mode == 'full':
flags += '| PyBUF_INDIRECT'
elif mode == 'strided':
flags += '| PyBUF_STRIDES'
elif mode == 'c':
flags += '| PyBUF_C_CONTIGUOUS'
elif mode == 'fortran':
flags += '| PyBUF_F_CONTIGUOUS'
else:
assert False
if buffer_aux.writable_needed: flags += "| PyBUF_WRITABLE"
return flags
def used_buffer_aux_vars(entry):
buffer_aux = entry.buffer_aux
buffer_aux.buflocal_nd_var.used = True
buffer_aux.rcbuf_var.used = True
def put_unpack_buffer_aux_into_scope(buf_entry, code):
# Generate code to copy the needed struct info into local
# variables.
buffer_aux, mode = buf_entry.buffer_aux, buf_entry.type.mode
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
fldnames = ['strides', 'shape']
if mode == 'full':
fldnames.append('suboffsets')
ln = []
for i in range(buf_entry.type.ndim):
for fldname in fldnames:
ln.append("%s.diminfo[%d].%s = %s.rcbuffer->pybuffer.%s[%d];" % \
(pybuffernd_struct, i, fldname,
pybuffernd_struct, fldname, i))
code.putln(' '.join(ln))
def put_init_vars(entry, code):
bufaux = entry.buffer_aux
pybuffernd_struct = bufaux.buflocal_nd_var.cname
pybuffer_struct = bufaux.rcbuf_var.cname
# init pybuffer_struct
code.putln("%s.pybuffer.buf = NULL;" % pybuffer_struct)
code.putln("%s.refcount = 0;" % pybuffer_struct)
# init the buffer object
# code.put_init_var_to_py_none(entry)
# init the pybuffernd_struct
code.putln("%s.data = NULL;" % pybuffernd_struct)
code.putln("%s.rcbuffer = &%s;" % (pybuffernd_struct, pybuffer_struct))
def put_acquire_arg_buffer(entry, code, pos):
buffer_aux = entry.buffer_aux
getbuffer = get_getbuffer_call(code, entry.cname, buffer_aux, entry.type)
# Acquire any new buffer
code.putln("{")
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % entry.type.dtype.struct_nesting_depth())
code.putln(code.error_goto_if("%s == -1" % getbuffer, pos))
code.putln("}")
# An exception raised in arg parsing cannot be caught, so no
# need to care about the buffer then.
put_unpack_buffer_aux_into_scope(entry, code)
def put_release_buffer_code(code, entry):
code.globalstate.use_utility_code(acquire_utility_code)
code.putln("__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);" % entry.buffer_aux.buflocal_nd_var.cname)
def get_getbuffer_call(code, obj_cname, buffer_aux, buffer_type):
ndim = buffer_type.ndim
cast = int(buffer_type.cast)
flags = get_flags(buffer_aux, buffer_type)
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
dtype_typeinfo = get_type_information_cname(code, buffer_type.dtype)
code.globalstate.use_utility_code(acquire_utility_code)
return ("__Pyx_GetBufferAndValidate(&%(pybuffernd_struct)s.rcbuffer->pybuffer, "
"(PyObject*)%(obj_cname)s, &%(dtype_typeinfo)s, %(flags)s, %(ndim)d, "
"%(cast)d, __pyx_stack)" % locals())
def put_assign_to_buffer(lhs_cname, rhs_cname, buf_entry,
is_initialized, pos, code):
"""
Generate code for reassigning a buffer variables. This only deals with getting
the buffer auxiliary structure and variables set up correctly, the assignment
itself and refcounting is the responsibility of the caller.
However, the assignment operation may throw an exception so that the reassignment
never happens.
Depending on the circumstances there are two possible outcomes:
- Old buffer released, new acquired, rhs assigned to lhs
- Old buffer released, new acquired which fails, reaqcuire old lhs buffer
(which may or may not succeed).
"""
buffer_aux, buffer_type = buf_entry.buffer_aux, buf_entry.type
pybuffernd_struct = buffer_aux.buflocal_nd_var.cname
flags = get_flags(buffer_aux, buffer_type)
code.putln("{") # Set up necessary stack for getbuffer
code.putln("__Pyx_BufFmt_StackElem __pyx_stack[%d];" % buffer_type.dtype.struct_nesting_depth())
getbuffer = get_getbuffer_call(code, "%s", buffer_aux, buffer_type) # fill in object below
if is_initialized:
# Release any existing buffer
code.putln('__Pyx_SafeReleaseBuffer(&%s.rcbuffer->pybuffer);' % pybuffernd_struct)
# Acquire
retcode_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = %s;" % (retcode_cname, getbuffer % rhs_cname))
code.putln('if (%s) {' % (code.unlikely("%s < 0" % retcode_cname)))
# If acquisition failed, attempt to reacquire the old buffer
# before raising the exception. A failure of reacquisition
# will cause the reacquisition exception to be reported, one
# can consider working around this later.
exc_temps = tuple(code.funcstate.allocate_temp(PyrexTypes.py_object_type, manage_ref=False)
for _ in range(3))
code.putln('PyErr_Fetch(&%s, &%s, &%s);' % exc_temps)
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % lhs_cname)))
code.putln('Py_XDECREF(%s); Py_XDECREF(%s); Py_XDECREF(%s);' % exc_temps) # Do not refnanny these!
code.globalstate.use_utility_code(raise_buffer_fallback_code)
code.putln('__Pyx_RaiseBufferFallbackError();')
code.putln('} else {')
code.putln('PyErr_Restore(%s, %s, %s);' % exc_temps)
code.putln('}')
code.putln('%s = %s = %s = 0;' % exc_temps)
for t in exc_temps:
code.funcstate.release_temp(t)
code.putln('}')
# Unpack indices
put_unpack_buffer_aux_into_scope(buf_entry, code)
code.putln(code.error_goto_if_neg(retcode_cname, pos))
code.funcstate.release_temp(retcode_cname)
else:
# Our entry had no previous value, so set to None when acquisition fails.
# In this case, auxiliary vars should be set up right in initialization to a zero-buffer,
# so it suffices to set the buf field to NULL.
code.putln('if (%s) {' % code.unlikely("%s == -1" % (getbuffer % rhs_cname)))
code.putln('%s = %s; __Pyx_INCREF(Py_None); %s.rcbuffer->pybuffer.buf = NULL;' %
(lhs_cname,
PyrexTypes.typecast(buffer_type, PyrexTypes.py_object_type, "Py_None"),
pybuffernd_struct))
code.putln(code.error_goto(pos))
code.put('} else {')
# Unpack indices
put_unpack_buffer_aux_into_scope(buf_entry, code)
code.putln('}')
code.putln("}") # Release stack
def put_buffer_lookup_code(entry, index_signeds, index_cnames, directives,
pos, code, negative_indices, in_nogil_context):
"""
Generates code to process indices and calculate an offset into
a buffer. Returns a C string which gives a pointer which can be
read from or written to at will (it is an expression so caller should
store it in a temporary if it is used more than once).
As the bounds checking can have any number of combinations of unsigned
arguments, smart optimizations etc. we insert it directly in the function
body. The lookup however is delegated to a inline function that is instantiated
once per ndim (lookup with suboffsets tend to get quite complicated).
entry is a BufferEntry
"""
negative_indices = directives['wraparound'] and negative_indices
if directives['boundscheck']:
# Check bounds and fix negative indices.
# We allocate a temporary which is initialized to -1, meaning OK (!).
# If an error occurs, the temp is set to the index dimension the
# error is occurring at.
failed_dim_temp = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = -1;" % failed_dim_temp)
for dim, (signed, cname, shape) in enumerate(zip(index_signeds, index_cnames, entry.get_buf_shapevars())):
if signed != 0:
# not unsigned, deal with negative index
code.putln("if (%s < 0) {" % cname)
if negative_indices:
code.putln("%s += %s;" % (cname, shape))
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s < 0" % cname),
failed_dim_temp, dim))
else:
code.putln("%s = %d;" % (failed_dim_temp, dim))
code.put("} else ")
# check bounds in positive direction
if signed != 0:
cast = ""
else:
cast = "(size_t)"
code.putln("if (%s) %s = %d;" % (
code.unlikely("%s >= %s%s" % (cname, cast, shape)),
failed_dim_temp, dim))
if in_nogil_context:
code.globalstate.use_utility_code(raise_indexerror_nogil)
func = '__Pyx_RaiseBufferIndexErrorNogil'
else:
code.globalstate.use_utility_code(raise_indexerror_code)
func = '__Pyx_RaiseBufferIndexError'
code.putln("if (%s) {" % code.unlikely("%s != -1" % failed_dim_temp))
code.putln('%s(%s);' % (func, failed_dim_temp))
code.putln(code.error_goto(pos))
code.putln('}')
code.funcstate.release_temp(failed_dim_temp)
elif negative_indices:
# Only fix negative indices.
for signed, cname, shape in zip(index_signeds, index_cnames, entry.get_buf_shapevars()):
if signed != 0:
code.putln("if (%s < 0) %s += %s;" % (cname, cname, shape))
return entry.generate_buffer_lookup_code(code, index_cnames)
def use_bufstruct_declare_code(env):
env.use_utility_code(buffer_struct_declare_code)
def buf_lookup_full_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
of dimensions. The function gives back a void* at the right location.
"""
# _i_ndex, _s_tride, sub_o_ffset
macroargs = ", ".join(["i%d, s%d, o%d" % (i, i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)(%s_imp(buf, %s))" % (name, macroargs, name, macroargs))
funcargs = ", ".join(["Py_ssize_t i%d, Py_ssize_t s%d, Py_ssize_t o%d" % (i, i, i) for i in range(nd)])
proto.putln("static CYTHON_INLINE void* %s_imp(void* buf, %s);" % (name, funcargs))
defin.putln(dedent("""
static CYTHON_INLINE void* %s_imp(void* buf, %s) {
char* ptr = (char*)buf;
""") % (name, funcargs) + "".join([dedent("""\
ptr += s%d * i%d;
if (o%d >= 0) ptr = *((char**)ptr) + o%d;
""") % (i, i, i, i) for i in range(nd)]
) + "\nreturn ptr;\n}")
def buf_lookup_strided_code(proto, defin, name, nd):
"""
Generates a buffer lookup function for the right number
of dimensions. The function gives back a void* at the right location.
"""
# _i_ndex, _s_tride
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd)])
proto.putln("#define %s(type, buf, %s) (type)((char*)buf + %s)" % (name, args, offset))
def buf_lookup_c_code(proto, defin, name, nd):
"""
Similar to strided lookup, but can assume that the last dimension
doesn't need a multiplication as long as.
Still we keep the same signature for now.
"""
if nd == 1:
proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
else:
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(nd - 1)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, nd - 1))
def buf_lookup_fortran_code(proto, defin, name, nd):
"""
Like C lookup, but the first index is optimized instead.
"""
if nd == 1:
proto.putln("#define %s(type, buf, i0, s0) ((type)buf + i0)" % name)
else:
args = ", ".join(["i%d, s%d" % (i, i) for i in range(nd)])
offset = " + ".join(["i%d * s%d" % (i, i) for i in range(1, nd)])
proto.putln("#define %s(type, buf, %s) ((type)((char*)buf + %s) + i%d)" % (name, args, offset, 0))
def use_py2_buffer_functions(env):
env.use_utility_code(GetAndReleaseBufferUtilityCode())
class GetAndReleaseBufferUtilityCode(object):
# Emulation of PyObject_GetBuffer and PyBuffer_Release for Python 2.
# For >= 2.6 we do double mode -- use the new buffer interface on objects
# which has the right tp_flags set, but emulation otherwise.
requires = None
is_cython_utility = False
def __init__(self):
pass
def __eq__(self, other):
return isinstance(other, GetAndReleaseBufferUtilityCode)
def __hash__(self):
return 24342342
def get_tree(self, **kwargs): pass
def put_code(self, output):
code = output['utility_code_def']
proto_code = output['utility_code_proto']
env = output.module_node.scope
cython_scope = env.context.cython_scope
# Search all types for __getbuffer__ overloads
types = []
visited_scopes = set()
def find_buffer_types(scope):
if scope in visited_scopes:
return
visited_scopes.add(scope)
for m in scope.cimported_modules:
find_buffer_types(m)
for e in scope.type_entries:
if isinstance(e.utility_code_definition, CythonUtilityCode):
continue
t = e.type
if t.is_extension_type:
if scope is cython_scope and not e.used:
continue
release = get = None
for x in t.scope.pyfunc_entries:
if x.name == u"__getbuffer__": get = x.func_cname
elif x.name == u"__releasebuffer__": release = x.func_cname
if get:
types.append((t.typeptr_cname, get, release))
find_buffer_types(env)
util_code = TempitaUtilityCode.load(
"GetAndReleaseBuffer", from_file="Buffer.c",
context=dict(types=types))
proto = util_code.format_code(util_code.proto)
impl = util_code.format_code(
util_code.inject_string_constants(util_code.impl, output)[1])
proto_code.putln(proto)
code.putln(impl)
def mangle_dtype_name(dtype):
# Use prefixes to separate user defined types from builtins
# (consider "typedef float unsigned_int")
if dtype.is_pyobject:
return "object"
elif dtype.is_ptr:
return "ptr"
else:
if dtype.is_typedef or dtype.is_struct_or_union:
prefix = "nn_"
else:
prefix = ""
return prefix + dtype.specialization_name()
def get_type_information_cname(code, dtype, maxdepth=None):
"""
Output the run-time type information (__Pyx_TypeInfo) for given dtype,
and return the name of the type info struct.
Structs with two floats of the same size are encoded as complex numbers.
One can separate between complex numbers declared as struct or with native
encoding by inspecting to see if the fields field of the type is
filled in.
"""
namesuffix = mangle_dtype_name(dtype)
name = "__Pyx_TypeInfo_%s" % namesuffix
structinfo_name = "__Pyx_StructFields_%s" % namesuffix
if dtype.is_error: return ""
# It's critical that walking the type info doesn't use more stack
# depth than dtype.struct_nesting_depth() returns, so use an assertion for this
if maxdepth is None: maxdepth = dtype.struct_nesting_depth()
if maxdepth <= 0:
assert False
if name not in code.globalstate.utility_codes:
code.globalstate.utility_codes.add(name)
typecode = code.globalstate['typeinfo']
arraysizes = []
if dtype.is_array:
while dtype.is_array:
arraysizes.append(dtype.size)
dtype = dtype.base_type
complex_possible = dtype.is_struct_or_union and dtype.can_be_complex()
declcode = dtype.empty_declaration_code()
if dtype.is_simple_buffer_dtype():
structinfo_name = "NULL"
elif dtype.is_struct:
struct_scope = dtype.scope
if dtype.is_const:
struct_scope = struct_scope.const_base_type_scope
# Must pre-call all used types in order not to recurse during utility code writing.
fields = struct_scope.var_entries
assert len(fields) > 0
types = [get_type_information_cname(code, f.type, maxdepth - 1)
for f in fields]
typecode.putln("static __Pyx_StructField %s[] = {" % structinfo_name, safe=True)
for f, typeinfo in zip(fields, types):
typecode.putln(' {&%s, "%s", offsetof(%s, %s)},' %
(typeinfo, f.name, dtype.empty_declaration_code(), f.cname), safe=True)
typecode.putln(' {NULL, NULL, 0}', safe=True)
typecode.putln("};", safe=True)
else:
assert False
rep = str(dtype)
flags = "0"
is_unsigned = "0"
if dtype is PyrexTypes.c_char_type:
is_unsigned = "IS_UNSIGNED(%s)" % declcode
typegroup = "'H'"
elif dtype.is_int:
is_unsigned = "IS_UNSIGNED(%s)" % declcode
typegroup = "%s ? 'U' : 'I'" % is_unsigned
elif complex_possible or dtype.is_complex:
typegroup = "'C'"
elif dtype.is_float:
typegroup = "'R'"
elif dtype.is_struct:
typegroup = "'S'"
if dtype.packed:
flags = "__PYX_BUF_FLAGS_PACKED_STRUCT"
elif dtype.is_pyobject:
typegroup = "'O'"
else:
assert False, dtype
typeinfo = ('static __Pyx_TypeInfo %s = '
'{ "%s", %s, sizeof(%s), { %s }, %s, %s, %s, %s };')
tup = (name, rep, structinfo_name, declcode,
', '.join([str(x) for x in arraysizes]) or '0', len(arraysizes),
typegroup, is_unsigned, flags)
typecode.putln(typeinfo % tup, safe=True)
return name
def load_buffer_utility(util_code_name, context=None, **kwargs):
if context is None:
return UtilityCode.load(util_code_name, "Buffer.c", **kwargs)
else:
return TempitaUtilityCode.load(util_code_name, "Buffer.c", context=context, **kwargs)
context = dict(max_dims=Options.buffer_max_dims)
buffer_struct_declare_code = load_buffer_utility("BufferStructDeclare", context=context)
buffer_formats_declare_code = load_buffer_utility("BufferFormatStructs")
# Utility function to set the right exception
# The caller should immediately goto_error
raise_indexerror_code = load_buffer_utility("BufferIndexError")
raise_indexerror_nogil = load_buffer_utility("BufferIndexErrorNogil")
raise_buffer_fallback_code = load_buffer_utility("BufferFallbackError")
acquire_utility_code = load_buffer_utility("BufferGetAndValidate", context=context)
buffer_format_check_code = load_buffer_utility("BufferFormatCheck", context=context)
# See utility code BufferFormatFromTypeInfo
_typeinfo_to_format_code = load_buffer_utility("TypeInfoToFormat")
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/Builtin.py 0000644 0001751 0000171 00000054715 00000000000 020752 0 ustar 00runner docker 0000000 0000000 #
# Builtin Definitions
#
from __future__ import absolute_import
from .Symtab import BuiltinScope, StructOrUnionScope
from .Code import UtilityCode
from .TypeSlots import Signature
from . import PyrexTypes
from . import Options
# C-level implementations of builtin types, functions and methods
iter_next_utility_code = UtilityCode.load("IterNext", "ObjectHandling.c")
getattr_utility_code = UtilityCode.load("GetAttr", "ObjectHandling.c")
getattr3_utility_code = UtilityCode.load("GetAttr3", "Builtins.c")
pyexec_utility_code = UtilityCode.load("PyExec", "Builtins.c")
pyexec_globals_utility_code = UtilityCode.load("PyExecGlobals", "Builtins.c")
globals_utility_code = UtilityCode.load("Globals", "Builtins.c")
builtin_utility_code = {
'StopAsyncIteration': UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"),
}
# mapping from builtins to their C-level equivalents
class _BuiltinOverride(object):
def __init__(self, py_name, args, ret_type, cname, py_equiv="*",
utility_code=None, sig=None, func_type=None,
is_strict_signature=False, builtin_return_type=None):
self.py_name, self.cname, self.py_equiv = py_name, cname, py_equiv
self.args, self.ret_type = args, ret_type
self.func_type, self.sig = func_type, sig
self.builtin_return_type = builtin_return_type
self.is_strict_signature = is_strict_signature
self.utility_code = utility_code
def build_func_type(self, sig=None, self_arg=None):
if sig is None:
sig = Signature(self.args, self.ret_type)
sig.exception_check = False # not needed for the current builtins
func_type = sig.function_type(self_arg)
if self.is_strict_signature:
func_type.is_strict_signature = True
if self.builtin_return_type:
func_type.return_type = builtin_types[self.builtin_return_type]
return func_type
class BuiltinAttribute(object):
def __init__(self, py_name, cname=None, field_type=None, field_type_name=None):
self.py_name = py_name
self.cname = cname or py_name
self.field_type_name = field_type_name # can't do the lookup before the type is declared!
self.field_type = field_type
def declare_in_type(self, self_type):
if self.field_type_name is not None:
# lazy type lookup
field_type = builtin_scope.lookup(self.field_type_name).type
else:
field_type = self.field_type or PyrexTypes.py_object_type
entry = self_type.scope.declare(self.py_name, self.cname, field_type, None, 'private')
entry.is_variable = True
class BuiltinFunction(_BuiltinOverride):
def declare_in_scope(self, scope):
func_type, sig = self.func_type, self.sig
if func_type is None:
func_type = self.build_func_type(sig)
scope.declare_builtin_cfunction(self.py_name, func_type, self.cname,
self.py_equiv, self.utility_code)
class BuiltinMethod(_BuiltinOverride):
def declare_in_type(self, self_type):
method_type, sig = self.func_type, self.sig
if method_type is None:
# override 'self' type (first argument)
self_arg = PyrexTypes.CFuncTypeArg("", self_type, None)
self_arg.not_none = True
self_arg.accept_builtin_subtypes = True
method_type = self.build_func_type(sig, self_arg)
self_type.scope.declare_builtin_cfunction(
self.py_name, method_type, self.cname, utility_code=self.utility_code)
builtin_function_table = [
# name, args, return, C API func, py equiv = "*"
BuiltinFunction('abs', "d", "d", "fabs",
is_strict_signature = True),
BuiltinFunction('abs', "f", "f", "fabsf",
is_strict_signature = True),
BuiltinFunction('abs', "i", "i", "abs",
is_strict_signature = True),
BuiltinFunction('abs', "l", "l", "labs",
is_strict_signature = True),
BuiltinFunction('abs', None, None, "__Pyx_abs_longlong",
utility_code = UtilityCode.load("abs_longlong", "Builtins.c"),
func_type = PyrexTypes.CFuncType(
PyrexTypes.c_longlong_type, [
PyrexTypes.CFuncTypeArg("arg", PyrexTypes.c_longlong_type, None)
],
is_strict_signature = True, nogil=True)),
] + list(
BuiltinFunction('abs', None, None, "/*abs_{0}*/".format(t.specialization_name()),
func_type = PyrexTypes.CFuncType(
t,
[PyrexTypes.CFuncTypeArg("arg", t, None)],
is_strict_signature = True, nogil=True))
for t in (PyrexTypes.c_uint_type, PyrexTypes.c_ulong_type, PyrexTypes.c_ulonglong_type)
) + list(
BuiltinFunction('abs', None, None, "__Pyx_c_abs{0}".format(t.funcsuffix),
func_type = PyrexTypes.CFuncType(
t.real_type, [
PyrexTypes.CFuncTypeArg("arg", t, None)
],
is_strict_signature = True, nogil=True))
for t in (PyrexTypes.c_float_complex_type,
PyrexTypes.c_double_complex_type,
PyrexTypes.c_longdouble_complex_type)
) + [
BuiltinFunction('abs', "O", "O", "__Pyx_PyNumber_Absolute",
utility_code=UtilityCode.load("py_abs", "Builtins.c")),
#('all', "", "", ""),
#('any', "", "", ""),
#('ascii', "", "", ""),
#('bin', "", "", ""),
BuiltinFunction('callable', "O", "b", "__Pyx_PyCallable_Check",
utility_code = UtilityCode.load("CallableCheck", "ObjectHandling.c")),
#('chr', "", "", ""),
#('cmp', "", "", "", ""), # int PyObject_Cmp(PyObject *o1, PyObject *o2, int *result)
#('compile', "", "", ""), # PyObject* Py_CompileString( char *str, char *filename, int start)
BuiltinFunction('delattr', "OO", "r", "PyObject_DelAttr"),
BuiltinFunction('dir', "O", "O", "PyObject_Dir"),
BuiltinFunction('divmod', "OO", "O", "PyNumber_Divmod"),
BuiltinFunction('exec', "O", "O", "__Pyx_PyExecGlobals",
utility_code = pyexec_globals_utility_code),
BuiltinFunction('exec', "OO", "O", "__Pyx_PyExec2",
utility_code = pyexec_utility_code),
BuiltinFunction('exec', "OOO", "O", "__Pyx_PyExec3",
utility_code = pyexec_utility_code),
#('eval', "", "", ""),
#('execfile', "", "", ""),
#('filter', "", "", ""),
BuiltinFunction('getattr3', "OOO", "O", "__Pyx_GetAttr3", "getattr",
utility_code=getattr3_utility_code), # Pyrex legacy
BuiltinFunction('getattr', "OOO", "O", "__Pyx_GetAttr3",
utility_code=getattr3_utility_code),
BuiltinFunction('getattr', "OO", "O", "__Pyx_GetAttr",
utility_code=getattr_utility_code),
BuiltinFunction('hasattr', "OO", "b", "__Pyx_HasAttr",
utility_code = UtilityCode.load("HasAttr", "Builtins.c")),
BuiltinFunction('hash', "O", "h", "PyObject_Hash"),
#('hex', "", "", ""),
#('id', "", "", ""),
#('input', "", "", ""),
BuiltinFunction('intern', "O", "O", "__Pyx_Intern",
utility_code = UtilityCode.load("Intern", "Builtins.c")),
BuiltinFunction('isinstance', "OO", "b", "PyObject_IsInstance"),
BuiltinFunction('issubclass', "OO", "b", "PyObject_IsSubclass"),
BuiltinFunction('iter', "OO", "O", "PyCallIter_New"),
BuiltinFunction('iter', "O", "O", "PyObject_GetIter"),
BuiltinFunction('len', "O", "z", "PyObject_Length"),
BuiltinFunction('locals', "", "O", "__pyx_locals"),
#('map', "", "", ""),
#('max', "", "", ""),
#('min', "", "", ""),
BuiltinFunction('next', "O", "O", "__Pyx_PyIter_Next",
utility_code = iter_next_utility_code), # not available in Py2 => implemented here
BuiltinFunction('next', "OO", "O", "__Pyx_PyIter_Next2",
utility_code = iter_next_utility_code), # not available in Py2 => implemented here
#('oct', "", "", ""),
#('open', "ss", "O", "PyFile_FromString"), # not in Py3
] + [
BuiltinFunction('ord', None, None, "__Pyx_long_cast",
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_long_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
is_strict_signature=True))
for c_type in [PyrexTypes.c_py_ucs4_type, PyrexTypes.c_py_unicode_type]
] + [
BuiltinFunction('ord', None, None, "__Pyx_uchar_cast",
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_uchar_type, [PyrexTypes.CFuncTypeArg("c", c_type, None)],
is_strict_signature=True))
for c_type in [PyrexTypes.c_char_type, PyrexTypes.c_schar_type, PyrexTypes.c_uchar_type]
] + [
BuiltinFunction('ord', None, None, "__Pyx_PyObject_Ord",
utility_code=UtilityCode.load_cached("object_ord", "Builtins.c"),
func_type=PyrexTypes.CFuncType(
PyrexTypes.c_long_type, [
PyrexTypes.CFuncTypeArg("c", PyrexTypes.py_object_type, None)
],
exception_value="(long)(Py_UCS4)-1")),
BuiltinFunction('pow', "OOO", "O", "PyNumber_Power"),
BuiltinFunction('pow', "OO", "O", "__Pyx_PyNumber_Power2",
utility_code = UtilityCode.load("pow2", "Builtins.c")),
#('range', "", "", ""),
#('raw_input', "", "", ""),
#('reduce', "", "", ""),
BuiltinFunction('reload', "O", "O", "PyImport_ReloadModule"),
BuiltinFunction('repr', "O", "O", "PyObject_Repr"), # , builtin_return_type='str'), # add in Cython 3.1
#('round', "", "", ""),
BuiltinFunction('setattr', "OOO", "r", "PyObject_SetAttr"),
#('sum', "", "", ""),
#('sorted', "", "", ""),
#('type', "O", "O", "PyObject_Type"),
#('unichr', "", "", ""),
#('unicode', "", "", ""),
#('vars', "", "", ""),
#('zip', "", "", ""),
# Can't do these easily until we have builtin type entries.
#('typecheck', "OO", "i", "PyObject_TypeCheck", False),
#('issubtype', "OO", "i", "PyType_IsSubtype", False),
# Put in namespace append optimization.
BuiltinFunction('__Pyx_PyObject_Append', "OO", "O", "__Pyx_PyObject_Append"),
# This is conditionally looked up based on a compiler directive.
BuiltinFunction('__Pyx_Globals', "", "O", "__Pyx_Globals",
utility_code=globals_utility_code),
]
# Builtin types
# bool
# buffer
# classmethod
# dict
# enumerate
# file
# float
# int
# list
# long
# object
# property
# slice
# staticmethod
# super
# str
# tuple
# type
# xrange
builtin_types_table = [
("type", "PyType_Type", []),
# This conflicts with the C++ bool type, and unfortunately
# C++ is too liberal about PyObject* <-> bool conversions,
# resulting in unintuitive runtime behavior and segfaults.
# ("bool", "PyBool_Type", []),
("int", "PyInt_Type", []),
("long", "PyLong_Type", []),
("float", "PyFloat_Type", []),
("complex", "PyComplex_Type", [BuiltinAttribute('cval', field_type_name = 'Py_complex'),
BuiltinAttribute('real', 'cval.real', field_type = PyrexTypes.c_double_type),
BuiltinAttribute('imag', 'cval.imag', field_type = PyrexTypes.c_double_type),
]),
("basestring", "PyBaseString_Type", [
BuiltinMethod("join", "TO", "T", "__Pyx_PyBaseString_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("bytearray", "PyByteArray_Type", [
]),
("bytes", "PyBytes_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("join", "TO", "O", "__Pyx_PyBytes_Join",
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("str", "PyString_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("join", "TO", "O", "__Pyx_PyString_Join",
builtin_return_type='basestring',
utility_code=UtilityCode.load("StringJoin", "StringTools.c")),
]),
("unicode", "PyUnicode_Type", [BuiltinMethod("__contains__", "TO", "b", "PyUnicode_Contains"),
BuiltinMethod("join", "TO", "T", "PyUnicode_Join"),
]),
("tuple", "PyTuple_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
]),
("list", "PyList_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("insert", "TzO", "r", "PyList_Insert"),
BuiltinMethod("reverse", "T", "r", "PyList_Reverse"),
BuiltinMethod("append", "TO", "r", "__Pyx_PyList_Append",
utility_code=UtilityCode.load("ListAppend", "Optimize.c")),
BuiltinMethod("extend", "TO", "r", "__Pyx_PyList_Extend",
utility_code=UtilityCode.load("ListExtend", "Optimize.c")),
]),
("dict", "PyDict_Type", [BuiltinMethod("__contains__", "TO", "b", "PyDict_Contains"),
BuiltinMethod("has_key", "TO", "b", "PyDict_Contains"),
BuiltinMethod("items", "T", "O", "__Pyx_PyDict_Items",
utility_code=UtilityCode.load("py_dict_items", "Builtins.c")),
BuiltinMethod("keys", "T", "O", "__Pyx_PyDict_Keys",
utility_code=UtilityCode.load("py_dict_keys", "Builtins.c")),
BuiltinMethod("values", "T", "O", "__Pyx_PyDict_Values",
utility_code=UtilityCode.load("py_dict_values", "Builtins.c")),
BuiltinMethod("iteritems", "T", "O", "__Pyx_PyDict_IterItems",
utility_code=UtilityCode.load("py_dict_iteritems", "Builtins.c")),
BuiltinMethod("iterkeys", "T", "O", "__Pyx_PyDict_IterKeys",
utility_code=UtilityCode.load("py_dict_iterkeys", "Builtins.c")),
BuiltinMethod("itervalues", "T", "O", "__Pyx_PyDict_IterValues",
utility_code=UtilityCode.load("py_dict_itervalues", "Builtins.c")),
BuiltinMethod("viewitems", "T", "O", "__Pyx_PyDict_ViewItems",
utility_code=UtilityCode.load("py_dict_viewitems", "Builtins.c")),
BuiltinMethod("viewkeys", "T", "O", "__Pyx_PyDict_ViewKeys",
utility_code=UtilityCode.load("py_dict_viewkeys", "Builtins.c")),
BuiltinMethod("viewvalues", "T", "O", "__Pyx_PyDict_ViewValues",
utility_code=UtilityCode.load("py_dict_viewvalues", "Builtins.c")),
BuiltinMethod("clear", "T", "r", "__Pyx_PyDict_Clear",
utility_code=UtilityCode.load("py_dict_clear", "Optimize.c")),
BuiltinMethod("copy", "T", "T", "PyDict_Copy")]),
("slice", "PySlice_Type", [BuiltinAttribute('start'),
BuiltinAttribute('stop'),
BuiltinAttribute('step'),
]),
# ("file", "PyFile_Type", []), # not in Py3
("set", "PySet_Type", [BuiltinMethod("__contains__", "TO", "b", "PySequence_Contains"),
BuiltinMethod("clear", "T", "r", "PySet_Clear"),
# discard() and remove() have a special treatment for unhashable values
BuiltinMethod("discard", "TO", "r", "__Pyx_PySet_Discard",
utility_code=UtilityCode.load("py_set_discard", "Optimize.c")),
BuiltinMethod("remove", "TO", "r", "__Pyx_PySet_Remove",
utility_code=UtilityCode.load("py_set_remove", "Optimize.c")),
# update is actually variadic (see Github issue #1645)
# BuiltinMethod("update", "TO", "r", "__Pyx_PySet_Update",
# utility_code=UtilityCode.load_cached("PySet_Update", "Builtins.c")),
BuiltinMethod("add", "TO", "r", "PySet_Add"),
BuiltinMethod("pop", "T", "O", "PySet_Pop")]),
("frozenset", "PyFrozenSet_Type", []),
("Exception", "((PyTypeObject*)PyExc_Exception)[0]", []),
("StopAsyncIteration", "((PyTypeObject*)__Pyx_PyExc_StopAsyncIteration)[0]", []),
]
types_that_construct_their_instance = set([
# some builtin types do not always return an instance of
# themselves - these do:
'type', 'bool', 'long', 'float', 'complex',
'bytes', 'unicode', 'bytearray',
'tuple', 'list', 'dict', 'set', 'frozenset'
# 'str', # only in Py3.x
# 'file', # only in Py2.x
])
builtin_structs_table = [
('Py_buffer', 'Py_buffer',
[("buf", PyrexTypes.c_void_ptr_type),
("obj", PyrexTypes.py_object_type),
("len", PyrexTypes.c_py_ssize_t_type),
("itemsize", PyrexTypes.c_py_ssize_t_type),
("readonly", PyrexTypes.c_bint_type),
("ndim", PyrexTypes.c_int_type),
("format", PyrexTypes.c_char_ptr_type),
("shape", PyrexTypes.c_py_ssize_t_ptr_type),
("strides", PyrexTypes.c_py_ssize_t_ptr_type),
("suboffsets", PyrexTypes.c_py_ssize_t_ptr_type),
("smalltable", PyrexTypes.CArrayType(PyrexTypes.c_py_ssize_t_type, 2)),
("internal", PyrexTypes.c_void_ptr_type),
]),
('Py_complex', 'Py_complex',
[('real', PyrexTypes.c_double_type),
('imag', PyrexTypes.c_double_type),
])
]
# set up builtin scope
builtin_scope = BuiltinScope()
def init_builtin_funcs():
for bf in builtin_function_table:
bf.declare_in_scope(builtin_scope)
builtin_types = {}
def init_builtin_types():
global builtin_types
for name, cname, methods in builtin_types_table:
utility = builtin_utility_code.get(name)
if name == 'frozenset':
objstruct_cname = 'PySetObject'
elif name == 'bytearray':
objstruct_cname = 'PyByteArrayObject'
elif name == 'bool':
objstruct_cname = None
elif name == 'Exception':
objstruct_cname = "PyBaseExceptionObject"
elif name == 'StopAsyncIteration':
objstruct_cname = "PyBaseExceptionObject"
else:
objstruct_cname = 'Py%sObject' % name.capitalize()
the_type = builtin_scope.declare_builtin_type(name, cname, utility, objstruct_cname)
builtin_types[name] = the_type
for method in methods:
method.declare_in_type(the_type)
def init_builtin_structs():
for name, cname, attribute_types in builtin_structs_table:
scope = StructOrUnionScope(name)
for attribute_name, attribute_type in attribute_types:
scope.declare_var(attribute_name, attribute_type, None,
attribute_name, allow_pyobject=True)
builtin_scope.declare_struct_or_union(
name, "struct", scope, 1, None, cname = cname)
def init_builtins():
init_builtin_structs()
init_builtin_types()
init_builtin_funcs()
builtin_scope.declare_var(
'__debug__', PyrexTypes.c_const_type(PyrexTypes.c_bint_type),
pos=None, cname='(!Py_OptimizeFlag)', is_cdef=True)
global list_type, tuple_type, dict_type, set_type, frozenset_type
global bytes_type, str_type, unicode_type, basestring_type, slice_type
global float_type, bool_type, type_type, complex_type, bytearray_type
type_type = builtin_scope.lookup('type').type
list_type = builtin_scope.lookup('list').type
tuple_type = builtin_scope.lookup('tuple').type
dict_type = builtin_scope.lookup('dict').type
set_type = builtin_scope.lookup('set').type
frozenset_type = builtin_scope.lookup('frozenset').type
slice_type = builtin_scope.lookup('slice').type
bytes_type = builtin_scope.lookup('bytes').type
str_type = builtin_scope.lookup('str').type
unicode_type = builtin_scope.lookup('unicode').type
basestring_type = builtin_scope.lookup('basestring').type
bytearray_type = builtin_scope.lookup('bytearray').type
float_type = builtin_scope.lookup('float').type
bool_type = builtin_scope.lookup('bool').type
complex_type = builtin_scope.lookup('complex').type
init_builtins()
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/CmdLine.py 0000644 0001751 0000171 00000023323 00000000000 020646 0 ustar 00runner docker 0000000 0000000 #
# Cython - Command Line Parsing
#
from __future__ import absolute_import
import os
import sys
from . import Options
usage = """\
Cython (http://cython.org) is a compiler for code written in the
Cython language. Cython is based on Pyrex by Greg Ewing.
Usage: cython [options] sourcefile.{pyx,py} ...
Options:
-V, --version Display version number of cython compiler
-l, --create-listing Write error messages to a listing file
-I, --include-dir Search for include files in named directory
(multiple include directories are allowed).
-o, --output-file Specify name of generated C file
-t, --timestamps Only compile newer source files
-f, --force Compile all source files (overrides implied -t)
-v, --verbose Be verbose, print file names on multiple compilation
-p, --embed-positions If specified, the positions in Cython files of each
function definition is embedded in its docstring.
--cleanup Release interned objects on python exit, for memory debugging.
Level indicates aggressiveness, default 0 releases nothing.
-w, --working Sets the working directory for Cython (the directory modules
are searched from)
--gdb Output debug information for cygdb
--gdb-outdir Specify gdb debug information output directory. Implies --gdb.
-D, --no-docstrings Strip docstrings from the compiled module.
-a, --annotate Produce a colorized HTML version of the source.
--annotate-coverage Annotate and include coverage information from cov.xml.
--line-directives Produce #line directives pointing to the .pyx source
--cplus Output a C++ rather than C file.
--embed[=] Generate a main() function that embeds the Python interpreter.
-2 Compile based on Python-2 syntax and code semantics.
-3 Compile based on Python-3 syntax and code semantics.
--3str Compile based on Python-3 syntax and code semantics without
assuming unicode by default for string literals under Python 2.
--lenient Change some compile time errors to runtime errors to
improve Python compatibility
--capi-reexport-cincludes Add cincluded headers to any auto-generated header files.
--fast-fail Abort the compilation on the first error
--warning-errors, -Werror Make all warnings into errors
--warning-extra, -Wextra Enable extra warnings
-X, --directive =[, 1:
sys.stderr.write(
"cython: Only one source file allowed when using -o\n")
sys.exit(1)
if len(sources) == 0 and not options.show_version:
bad_usage()
if Options.embed and len(sources) > 1:
sys.stderr.write(
"cython: Only one source file allowed when using -embed\n")
sys.exit(1)
return options, sources
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/Code.pxd 0000644 0001751 0000171 00000006432 00000000000 020352 0 ustar 00runner docker 0000000 0000000
from __future__ import absolute_import
cimport cython
from ..StringIOTree cimport StringIOTree
cdef class UtilityCodeBase(object):
cpdef format_code(self, code_string, replace_empty_lines=*)
cdef class UtilityCode(UtilityCodeBase):
cdef public object name
cdef public object proto
cdef public object impl
cdef public object init
cdef public object cleanup
cdef public object proto_block
cdef public object requires
cdef public dict _cache
cdef public list specialize_list
cdef public object file
cpdef none_or_sub(self, s, context)
cdef class FunctionState:
cdef public set names_taken
cdef public object owner
cdef public object scope
cdef public object error_label
cdef public size_t label_counter
cdef public set labels_used
cdef public object return_label
cdef public object continue_label
cdef public object break_label
cdef public list yield_labels
cdef public object return_from_error_cleanup_label # not used in __init__ ?
cdef public object exc_vars
cdef public object current_except
cdef public bint in_try_finally
cdef public bint can_trace
cdef public bint gil_owned
cdef public list temps_allocated
cdef public dict temps_free
cdef public dict temps_used_type
cdef public set zombie_temps
cdef public size_t temp_counter
cdef public list collect_temps_stack
cdef public object closure_temps
cdef public bint should_declare_error_indicator
cdef public bint uses_error_indicator
@cython.locals(n=size_t)
cpdef new_label(self, name=*)
cpdef tuple get_loop_labels(self)
cpdef set_loop_labels(self, labels)
cpdef tuple get_all_labels(self)
cpdef set_all_labels(self, labels)
cpdef start_collecting_temps(self)
cpdef stop_collecting_temps(self)
cpdef list temps_in_use(self)
cdef class IntConst:
cdef public object cname
cdef public object value
cdef public bint is_long
cdef class PyObjectConst:
cdef public object cname
cdef public object type
cdef class StringConst:
cdef public object cname
cdef public object text
cdef public object escaped_value
cdef public dict py_strings
cdef public list py_versions
@cython.locals(intern=bint, is_str=bint, is_unicode=bint)
cpdef get_py_string_const(self, encoding, identifier=*, is_str=*, py3str_cstring=*)
## cdef class PyStringConst:
## cdef public object cname
## cdef public object encoding
## cdef public bint is_str
## cdef public bint is_unicode
## cdef public bint intern
#class GlobalState(object):
#def funccontext_property(name):
cdef class CCodeWriter(object):
cdef readonly StringIOTree buffer
cdef readonly list pyclass_stack
cdef readonly object globalstate
cdef readonly object funcstate
cdef object code_config
cdef object last_pos
cdef object last_marked_pos
cdef Py_ssize_t level
cdef public Py_ssize_t call_level # debug-only, see Nodes.py
cdef bint bol
cpdef write(self, s)
cpdef put(self, code)
cpdef put_safe(self, code)
cpdef putln(self, code=*, bint safe=*)
@cython.final
cdef increase_indent(self)
@cython.final
cdef decrease_indent(self)
cdef class PyrexCodeWriter:
cdef public object f
cdef public Py_ssize_t level
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/Code.py 0000644 0001751 0000171 00000275656 00000000000 020227 0 ustar 00runner docker 0000000 0000000 # cython: language_level = 2
# cython: auto_pickle=False
#
# Code output module
#
from __future__ import absolute_import
import cython
cython.declare(os=object, re=object, operator=object, textwrap=object,
Template=object, Naming=object, Options=object, StringEncoding=object,
Utils=object, SourceDescriptor=object, StringIOTree=object,
DebugFlags=object, basestring=object, defaultdict=object,
closing=object, partial=object)
import os
import re
import shutil
import sys
import operator
import textwrap
from string import Template
from functools import partial
from contextlib import closing
from collections import defaultdict
try:
import hashlib
except ImportError:
import md5 as hashlib
from . import Naming
from . import Options
from . import DebugFlags
from . import StringEncoding
from . import Version
from .. import Utils
from .Scanning import SourceDescriptor
from ..StringIOTree import StringIOTree
try:
from __builtin__ import basestring
except ImportError:
from builtins import str as basestring
KEYWORDS_MUST_BE_BYTES = sys.version_info < (2, 7)
non_portable_builtins_map = {
# builtins that have different names in different Python versions
'bytes' : ('PY_MAJOR_VERSION < 3', 'str'),
'unicode' : ('PY_MAJOR_VERSION >= 3', 'str'),
'basestring' : ('PY_MAJOR_VERSION >= 3', 'str'),
'xrange' : ('PY_MAJOR_VERSION >= 3', 'range'),
'raw_input' : ('PY_MAJOR_VERSION >= 3', 'input'),
}
ctypedef_builtins_map = {
# types of builtins in "ctypedef class" statements which we don't
# import either because the names conflict with C types or because
# the type simply is not exposed.
'py_int' : '&PyInt_Type',
'py_long' : '&PyLong_Type',
'py_float' : '&PyFloat_Type',
'wrapper_descriptor' : '&PyWrapperDescr_Type',
}
basicsize_builtins_map = {
# builtins whose type has a different tp_basicsize than sizeof(...)
'PyTypeObject': 'PyHeapTypeObject',
}
uncachable_builtins = [
# Global/builtin names that cannot be cached because they may or may not
# be available at import time, for various reasons:
## - Py3.7+
'breakpoint', # might deserve an implementation in Cython
## - Py3.4+
'__loader__',
'__spec__',
## - Py3+
'BlockingIOError',
'BrokenPipeError',
'ChildProcessError',
'ConnectionAbortedError',
'ConnectionError',
'ConnectionRefusedError',
'ConnectionResetError',
'FileExistsError',
'FileNotFoundError',
'InterruptedError',
'IsADirectoryError',
'ModuleNotFoundError',
'NotADirectoryError',
'PermissionError',
'ProcessLookupError',
'RecursionError',
'ResourceWarning',
#'StopAsyncIteration', # backported
'TimeoutError',
'__build_class__',
'ascii', # might deserve an implementation in Cython
#'exec', # implemented in Cython
## - Py2.7+
'memoryview',
## - platform specific
'WindowsError',
## - others
'_', # e.g. used by gettext
]
special_py_methods = set([
'__cinit__', '__dealloc__', '__richcmp__', '__next__',
'__await__', '__aiter__', '__anext__',
'__getreadbuffer__', '__getwritebuffer__', '__getsegcount__',
'__getcharbuffer__', '__getbuffer__', '__releasebuffer__'
])
modifier_output_mapper = {
'inline': 'CYTHON_INLINE'
}.get
class IncludeCode(object):
"""
An include file and/or verbatim C code to be included in the
generated sources.
"""
# attributes:
#
# pieces {order: unicode}: pieces of C code to be generated.
# For the included file, the key "order" is zero.
# For verbatim include code, the "order" is the "order"
# attribute of the original IncludeCode where this piece
# of C code was first added. This is needed to prevent
# duplication if the same include code is found through
# multiple cimports.
# location int: where to put this include in the C sources, one
# of the constants INITIAL, EARLY, LATE
# order int: sorting order (automatically set by increasing counter)
# Constants for location. If the same include occurs with different
# locations, the earliest one takes precedense.
INITIAL = 0
EARLY = 1
LATE = 2
counter = 1 # Counter for "order"
def __init__(self, include=None, verbatim=None, late=True, initial=False):
self.order = self.counter
type(self).counter += 1
self.pieces = {}
if include:
if include[0] == '<' and include[-1] == '>':
self.pieces[0] = u'#include {0}'.format(include)
late = False # system include is never late
else:
self.pieces[0] = u'#include "{0}"'.format(include)
if verbatim:
self.pieces[self.order] = verbatim
if initial:
self.location = self.INITIAL
elif late:
self.location = self.LATE
else:
self.location = self.EARLY
def dict_update(self, d, key):
"""
Insert `self` in dict `d` with key `key`. If that key already
exists, update the attributes of the existing value with `self`.
"""
if key in d:
other = d[key]
other.location = min(self.location, other.location)
other.pieces.update(self.pieces)
else:
d[key] = self
def sortkey(self):
return self.order
def mainpiece(self):
"""
Return the main piece of C code, corresponding to the include
file. If there was no include file, return None.
"""
return self.pieces.get(0)
def write(self, code):
# Write values of self.pieces dict, sorted by the keys
for k in sorted(self.pieces):
code.putln(self.pieces[k])
def get_utility_dir():
# make this a function and not global variables:
# http://trac.cython.org/cython_trac/ticket/475
Cython_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
return os.path.join(Cython_dir, "Utility")
class UtilityCodeBase(object):
"""
Support for loading utility code from a file.
Code sections in the file can be specified as follows:
##### MyUtility.proto #####
[proto declarations]
##### MyUtility.init #####
[code run at module initialization]
##### MyUtility #####
#@requires: MyOtherUtility
#@substitute: naming
[definitions]
for prototypes and implementation respectively. For non-python or
-cython files backslashes should be used instead. 5 to 30 comment
characters may be used on either side.
If the @cname decorator is not used and this is a CythonUtilityCode,
one should pass in the 'name' keyword argument to be used for name
mangling of such entries.
"""
is_cython_utility = False
_utility_cache = {}
@classmethod
def _add_utility(cls, utility, type, lines, begin_lineno, tags=None):
if utility is None:
return
code = '\n'.join(lines)
if tags and 'substitute' in tags and tags['substitute'] == set(['naming']):
del tags['substitute']
try:
code = Template(code).substitute(vars(Naming))
except (KeyError, ValueError) as e:
raise RuntimeError("Error parsing templated utility code of type '%s' at line %d: %s" % (
type, begin_lineno, e))
# remember correct line numbers at least until after templating
code = '\n' * begin_lineno + code
if type == 'proto':
utility[0] = code
elif type == 'impl':
utility[1] = code
else:
all_tags = utility[2]
if KEYWORDS_MUST_BE_BYTES:
type = type.encode('ASCII')
all_tags[type] = code
if tags:
all_tags = utility[2]
for name, values in tags.items():
if KEYWORDS_MUST_BE_BYTES:
name = name.encode('ASCII')
all_tags.setdefault(name, set()).update(values)
@classmethod
def load_utilities_from_file(cls, path):
utilities = cls._utility_cache.get(path)
if utilities:
return utilities
filename = os.path.join(get_utility_dir(), path)
_, ext = os.path.splitext(path)
if ext in ('.pyx', '.py', '.pxd', '.pxi'):
comment = '#'
strip_comments = partial(re.compile(r'^\s*#(?!\s*cython\s*:).*').sub, '')
rstrip = StringEncoding._unicode.rstrip
else:
comment = '/'
strip_comments = partial(re.compile(r'^\s*//.*|/\*[^*]*\*/').sub, '')
rstrip = partial(re.compile(r'\s+(\\?)$').sub, r'\1')
match_special = re.compile(
(r'^%(C)s{5,30}\s*(?P(?:\w|\.)+)\s*%(C)s{5,30}|'
r'^%(C)s+@(?P\w+)\s*:\s*(?P(?:\w|[.:])+)') %
{'C': comment}).match
match_type = re.compile(r'(.+)[.](proto(?:[.]\S+)?|impl|init|cleanup)$').match
with closing(Utils.open_source_file(filename, encoding='UTF-8')) as f:
all_lines = f.readlines()
utilities = defaultdict(lambda: [None, None, {}])
lines = []
tags = defaultdict(set)
utility = type = None
begin_lineno = 0
for lineno, line in enumerate(all_lines):
m = match_special(line)
if m:
if m.group('name'):
cls._add_utility(utility, type, lines, begin_lineno, tags)
begin_lineno = lineno + 1
del lines[:]
tags.clear()
name = m.group('name')
mtype = match_type(name)
if mtype:
name, type = mtype.groups()
else:
type = 'impl'
utility = utilities[name]
else:
tags[m.group('tag')].add(m.group('value'))
lines.append('') # keep line number correct
else:
lines.append(rstrip(strip_comments(line)))
if utility is None:
raise ValueError("Empty utility code file")
# Don't forget to add the last utility code
cls._add_utility(utility, type, lines, begin_lineno, tags)
utilities = dict(utilities) # un-defaultdict-ify
cls._utility_cache[path] = utilities
return utilities
@classmethod
def load(cls, util_code_name, from_file=None, **kwargs):
"""
Load utility code from a file specified by from_file (relative to
Cython/Utility) and name util_code_name. If from_file is not given,
load it from the file util_code_name.*. There should be only one
file matched by this pattern.
"""
if '::' in util_code_name:
from_file, util_code_name = util_code_name.rsplit('::', 1)
if not from_file:
utility_dir = get_utility_dir()
prefix = util_code_name + '.'
try:
listing = os.listdir(utility_dir)
except OSError:
# XXX the code below assumes as 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
import zipfile
global __loader__
loader = __loader__
archive = loader.archive
with closing(zipfile.ZipFile(archive)) as fileobj:
listing = [os.path.basename(name)
for name in fileobj.namelist()
if os.path.join(archive, name).startswith(utility_dir)]
files = [filename for filename in listing
if filename.startswith(prefix)]
if not files:
raise ValueError("No match found for utility code " + util_code_name)
if len(files) > 1:
raise ValueError("More than one filename match found for utility code " + util_code_name)
from_file = files[0]
utilities = cls.load_utilities_from_file(from_file)
proto, impl, tags = utilities[util_code_name]
if tags:
orig_kwargs = kwargs.copy()
for name, values in tags.items():
if name in kwargs:
continue
# only pass lists when we have to: most argument expect one value or None
if name == 'requires':
if orig_kwargs:
values = [cls.load(dep, from_file, **orig_kwargs)
for dep in sorted(values)]
else:
# dependencies are rarely unique, so use load_cached() when we can
values = [cls.load_cached(dep, from_file)
for dep in sorted(values)]
elif not values:
values = None
elif len(values) == 1:
values = list(values)[0]
kwargs[name] = values
if proto is not None:
kwargs['proto'] = proto
if impl is not None:
kwargs['impl'] = impl
if 'name' not in kwargs:
kwargs['name'] = util_code_name
if 'file' not in kwargs and from_file:
kwargs['file'] = from_file
return cls(**kwargs)
@classmethod
def load_cached(cls, utility_code_name, from_file=None, __cache={}):
"""
Calls .load(), but using a per-type cache based on utility name and file name.
"""
key = (cls, from_file, utility_code_name)
try:
return __cache[key]
except KeyError:
pass
code = __cache[key] = cls.load(utility_code_name, from_file)
return code
@classmethod
def load_as_string(cls, util_code_name, from_file=None, **kwargs):
"""
Load a utility code as a string. Returns (proto, implementation)
"""
util = cls.load(util_code_name, from_file, **kwargs)
proto, impl = util.proto, util.impl
return util.format_code(proto), util.format_code(impl)
def format_code(self, code_string, replace_empty_lines=re.compile(r'\n\n+').sub):
"""
Format a code section for output.
"""
if code_string:
code_string = replace_empty_lines('\n', code_string.strip()) + '\n\n'
return code_string
def __str__(self):
return "<%s(%s)>" % (type(self).__name__, self.name)
def get_tree(self, **kwargs):
pass
def __deepcopy__(self, memodict=None):
# No need to deep-copy utility code since it's essentially immutable.
return self
class UtilityCode(UtilityCodeBase):
"""
Stores utility code to add during code generation.
See GlobalState.put_utility_code.
hashes/equals by instance
proto C prototypes
impl implementation code
init code to call on module initialization
requires utility code dependencies
proto_block the place in the resulting file where the prototype should
end up
name name of the utility code (or None)
file filename of the utility code file this utility was loaded
from (or None)
"""
def __init__(self, proto=None, impl=None, init=None, cleanup=None, requires=None,
proto_block='utility_code_proto', name=None, file=None):
# proto_block: Which code block to dump prototype in. See GlobalState.
self.proto = proto
self.impl = impl
self.init = init
self.cleanup = cleanup
self.requires = requires
self._cache = {}
self.specialize_list = []
self.proto_block = proto_block
self.name = name
self.file = file
def __hash__(self):
return hash((self.proto, self.impl))
def __eq__(self, other):
if self is other:
return True
self_type, other_type = type(self), type(other)
if self_type is not other_type and not (isinstance(other, self_type) or isinstance(self, other_type)):
return False
self_proto = getattr(self, 'proto', None)
other_proto = getattr(other, 'proto', None)
return (self_proto, self.impl) == (other_proto, other.impl)
def none_or_sub(self, s, context):
"""
Format a string in this utility code with context. If None, do nothing.
"""
if s is None:
return None
return s % context
def specialize(self, pyrex_type=None, **data):
# Dicts aren't hashable...
name = self.name
if pyrex_type is not None:
data['type'] = pyrex_type.empty_declaration_code()
data['type_name'] = pyrex_type.specialization_name()
name = "%s[%s]" % (name, data['type_name'])
key = tuple(sorted(data.items()))
try:
return self._cache[key]
except KeyError:
if self.requires is None:
requires = None
else:
requires = [r.specialize(data) for r in self.requires]
s = self._cache[key] = UtilityCode(
self.none_or_sub(self.proto, data),
self.none_or_sub(self.impl, data),
self.none_or_sub(self.init, data),
self.none_or_sub(self.cleanup, data),
requires,
self.proto_block,
name,
)
self.specialize_list.append(s)
return s
def inject_string_constants(self, impl, output):
"""Replace 'PYIDENT("xyz")' by a constant Python identifier cname.
"""
if 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl:
return False, impl
replacements = {}
def externalise(matchobj):
key = matchobj.groups()
try:
cname = replacements[key]
except KeyError:
str_type, name = key
cname = replacements[key] = output.get_py_string_const(
StringEncoding.EncodedString(name), identifier=str_type == 'IDENT').cname
return cname
impl = re.sub(r'PY(IDENT|UNICODE)\("([^"]+)"\)', externalise, impl)
assert 'PYIDENT(' not in impl and 'PYUNICODE(' not in impl
return True, impl
def inject_unbound_methods(self, impl, output):
"""Replace 'UNBOUND_METHOD(type, "name")' by a constant Python identifier cname.
"""
if 'CALL_UNBOUND_METHOD(' not in impl:
return False, impl
def externalise(matchobj):
type_cname, method_name, obj_cname, args = matchobj.groups()
args = [arg.strip() for arg in args[1:].split(',')] if args else []
assert len(args) < 3, "CALL_UNBOUND_METHOD() does not support %d call arguments" % len(args)
return output.cached_unbound_method_call_code(obj_cname, type_cname, method_name, args)
impl = re.sub(
r'CALL_UNBOUND_METHOD\('
r'([a-zA-Z_]+),' # type cname
r'\s*"([^"]+)",' # method name
r'\s*([^),]+)' # object cname
r'((?:,\s*[^),]+)*)' # args*
r'\)', externalise, impl)
assert 'CALL_UNBOUND_METHOD(' not in impl
return True, impl
def wrap_c_strings(self, impl):
"""Replace CSTRING('''xyz''') by a C compatible string
"""
if 'CSTRING(' not in impl:
return impl
def split_string(matchobj):
content = matchobj.group(1).replace('"', '\042')
return ''.join(
'"%s\\n"\n' % line if not line.endswith('\\') or line.endswith('\\\\') else '"%s"\n' % line[:-1]
for line in content.splitlines())
impl = re.sub(r'CSTRING\(\s*"""([^"]*(?:"[^"]+)*)"""\s*\)', split_string, impl)
assert 'CSTRING(' not in impl
return impl
def put_code(self, output):
if self.requires:
for dependency in self.requires:
output.use_utility_code(dependency)
if self.proto:
writer = output[self.proto_block]
writer.putln("/* %s.proto */" % self.name)
writer.put_or_include(
self.format_code(self.proto), '%s_proto' % self.name)
if self.impl:
impl = self.format_code(self.wrap_c_strings(self.impl))
is_specialised1, impl = self.inject_string_constants(impl, output)
is_specialised2, impl = self.inject_unbound_methods(impl, output)
writer = output['utility_code_def']
writer.putln("/* %s */" % self.name)
if not (is_specialised1 or is_specialised2):
# no module specific adaptations => can be reused
writer.put_or_include(impl, '%s_impl' % self.name)
else:
writer.put(impl)
if self.init:
writer = output['init_globals']
writer.putln("/* %s.init */" % self.name)
if isinstance(self.init, basestring):
writer.put(self.format_code(self.init))
else:
self.init(writer, output.module_pos)
writer.putln(writer.error_goto_if_PyErr(output.module_pos))
writer.putln()
if self.cleanup and Options.generate_cleanup_code:
writer = output['cleanup_globals']
writer.putln("/* %s.cleanup */" % self.name)
if isinstance(self.cleanup, basestring):
writer.put_or_include(
self.format_code(self.cleanup),
'%s_cleanup' % self.name)
else:
self.cleanup(writer, output.module_pos)
def sub_tempita(s, context, file=None, name=None):
"Run tempita on string s with given context."
if not s:
return None
if file:
context['__name'] = "%s:%s" % (file, name)
elif name:
context['__name'] = name
from ..Tempita import sub
return sub(s, **context)
class TempitaUtilityCode(UtilityCode):
def __init__(self, name=None, proto=None, impl=None, init=None, file=None, context=None, **kwargs):
if context is None:
context = {}
proto = sub_tempita(proto, context, file, name)
impl = sub_tempita(impl, context, file, name)
init = sub_tempita(init, context, file, name)
super(TempitaUtilityCode, self).__init__(
proto, impl, init=init, name=name, file=file, **kwargs)
@classmethod
def load_cached(cls, utility_code_name, from_file=None, context=None, __cache={}):
context_key = tuple(sorted(context.items())) if context else None
assert hash(context_key) is not None # raise TypeError if not hashable
key = (cls, from_file, utility_code_name, context_key)
try:
return __cache[key]
except KeyError:
pass
code = __cache[key] = cls.load(utility_code_name, from_file, context=context)
return code
def none_or_sub(self, s, context):
"""
Format a string in this utility code with context. If None, do nothing.
"""
if s is None:
return None
return sub_tempita(s, context, self.file, self.name)
class LazyUtilityCode(UtilityCodeBase):
"""
Utility code that calls a callback with the root code writer when
available. Useful when you only have 'env' but not 'code'.
"""
__name__ = ''
requires = None
def __init__(self, callback):
self.callback = callback
def put_code(self, globalstate):
utility = self.callback(globalstate.rootwriter)
globalstate.use_utility_code(utility)
class FunctionState(object):
# return_label string function return point label
# error_label string error catch point label
# continue_label string loop continue point label
# break_label string loop break point label
# return_from_error_cleanup_label string
# label_counter integer counter for naming labels
# in_try_finally boolean inside try of try...finally
# exc_vars (string * 3) exception variables for reraise, or None
# can_trace boolean line tracing is supported in the current context
# scope Scope the scope object of the current function
# Not used for now, perhaps later
def __init__(self, owner, names_taken=set(), scope=None):
self.names_taken = names_taken
self.owner = owner
self.scope = scope
self.error_label = None
self.label_counter = 0
self.labels_used = set()
self.return_label = self.new_label()
self.new_error_label()
self.continue_label = None
self.break_label = None
self.yield_labels = []
self.in_try_finally = 0
self.exc_vars = None
self.current_except = None
self.can_trace = False
self.gil_owned = True
self.temps_allocated = [] # of (name, type, manage_ref, static)
self.temps_free = {} # (type, manage_ref) -> list of free vars with same type/managed status
self.temps_used_type = {} # name -> (type, manage_ref)
self.zombie_temps = set() # temps that must not be reused after release
self.temp_counter = 0
self.closure_temps = None
# This is used to collect temporaries, useful to find out which temps
# need to be privatized in parallel sections
self.collect_temps_stack = []
# This is used for the error indicator, which needs to be local to the
# function. It used to be global, which relies on the GIL being held.
# However, exceptions may need to be propagated through 'nogil'
# sections, in which case we introduce a race condition.
self.should_declare_error_indicator = False
self.uses_error_indicator = False
# safety checks
def validate_exit(self):
# validate that all allocated temps have been freed
if self.temps_allocated:
leftovers = self.temps_in_use()
if leftovers:
msg = "TEMPGUARD: Temps left over at end of '%s': %s" % (self.scope.name, ', '.join([
'%s [%s]' % (name, ctype)
for name, ctype, is_pytemp in sorted(leftovers)]),
)
#print(msg)
raise RuntimeError(msg)
# labels
def new_label(self, name=None):
n = self.label_counter
self.label_counter = n + 1
label = "%s%d" % (Naming.label_prefix, n)
if name is not None:
label += '_' + name
return label
def new_yield_label(self, expr_type='yield'):
label = self.new_label('resume_from_%s' % expr_type)
num_and_label = (len(self.yield_labels) + 1, label)
self.yield_labels.append(num_and_label)
return num_and_label
def new_error_label(self):
old_err_lbl = self.error_label
self.error_label = self.new_label('error')
return old_err_lbl
def get_loop_labels(self):
return (
self.continue_label,
self.break_label)
def set_loop_labels(self, labels):
(self.continue_label,
self.break_label) = labels
def new_loop_labels(self):
old_labels = self.get_loop_labels()
self.set_loop_labels(
(self.new_label("continue"),
self.new_label("break")))
return old_labels
def get_all_labels(self):
return (
self.continue_label,
self.break_label,
self.return_label,
self.error_label)
def set_all_labels(self, labels):
(self.continue_label,
self.break_label,
self.return_label,
self.error_label) = labels
def all_new_labels(self):
old_labels = self.get_all_labels()
new_labels = []
for old_label, name in zip(old_labels, ['continue', 'break', 'return', 'error']):
if old_label:
new_labels.append(self.new_label(name))
else:
new_labels.append(old_label)
self.set_all_labels(new_labels)
return old_labels
def use_label(self, lbl):
self.labels_used.add(lbl)
def label_used(self, lbl):
return lbl in self.labels_used
# temp handling
def allocate_temp(self, type, manage_ref, static=False, reusable=True):
"""
Allocates a temporary (which may create a new one or get a previously
allocated and released one of the same type). Type is simply registered
and handed back, but will usually be a PyrexType.
If type.is_pyobject, manage_ref comes into play. If manage_ref is set to
True, the temp will be decref-ed on return statements and in exception
handling clauses. Otherwise the caller has to deal with any reference
counting of the variable.
If not type.is_pyobject, then manage_ref will be ignored, but it
still has to be passed. It is recommended to pass False by convention
if it is known that type will never be a Python object.
static=True marks the temporary declaration with "static".
This is only used when allocating backing store for a module-level
C array literals.
if reusable=False, the temp will not be reused after release.
A C string referring to the variable is returned.
"""
if type.is_const and not type.is_reference:
type = type.const_base_type
elif type.is_reference and not type.is_fake_reference:
type = type.ref_base_type
elif type.is_cfunction:
from . import PyrexTypes
type = PyrexTypes.c_ptr_type(type) # A function itself isn't an l-value
if not type.is_pyobject and not type.is_memoryviewslice:
# Make manage_ref canonical, so that manage_ref will always mean
# a decref is needed.
manage_ref = False
freelist = self.temps_free.get((type, manage_ref))
if reusable and freelist is not None and freelist[0]:
result = freelist[0].pop()
freelist[1].remove(result)
else:
while True:
self.temp_counter += 1
result = "%s%d" % (Naming.codewriter_temp_prefix, self.temp_counter)
if result not in self.names_taken: break
self.temps_allocated.append((result, type, manage_ref, static))
if not reusable:
self.zombie_temps.add(result)
self.temps_used_type[result] = (type, manage_ref)
if DebugFlags.debug_temp_code_comments:
self.owner.putln("/* %s allocated (%s)%s */" % (result, type, "" if reusable else " - zombie"))
if self.collect_temps_stack:
self.collect_temps_stack[-1].add((result, type))
return result
def release_temp(self, name):
"""
Releases a temporary so that it can be reused by other code needing
a temp of the same type.
"""
type, manage_ref = self.temps_used_type[name]
freelist = self.temps_free.get((type, manage_ref))
if freelist is None:
freelist = ([], set()) # keep order in list and make lookups in set fast
self.temps_free[(type, manage_ref)] = freelist
if name in freelist[1]:
raise RuntimeError("Temp %s freed twice!" % name)
if name not in self.zombie_temps:
freelist[0].append(name)
freelist[1].add(name)
if DebugFlags.debug_temp_code_comments:
self.owner.putln("/* %s released %s*/" % (
name, " - zombie" if name in self.zombie_temps else ""))
def temps_in_use(self):
"""Return a list of (cname,type,manage_ref) tuples of temp names and their type
that are currently in use.
"""
used = []
for name, type, manage_ref, static in self.temps_allocated:
freelist = self.temps_free.get((type, manage_ref))
if freelist is None or name not in freelist[1]:
used.append((name, type, manage_ref and type.is_pyobject))
return used
def temps_holding_reference(self):
"""Return a list of (cname,type) tuples of temp names and their type
that are currently in use. This includes only temps of a
Python object type which owns its reference.
"""
return [(name, type)
for name, type, manage_ref in self.temps_in_use()
if manage_ref and type.is_pyobject]
def all_managed_temps(self):
"""Return a list of (cname, type) tuples of refcount-managed Python objects.
"""
return [(cname, type)
for cname, type, manage_ref, static in self.temps_allocated
if manage_ref]
def all_free_managed_temps(self):
"""Return a list of (cname, type) tuples of refcount-managed Python
objects that are not currently in use. This is used by
try-except and try-finally blocks to clean up temps in the
error case.
"""
return sorted([ # Enforce deterministic order.
(cname, type)
for (type, manage_ref), freelist in self.temps_free.items() if manage_ref
for cname in freelist[0]
])
def start_collecting_temps(self):
"""
Useful to find out which temps were used in a code block
"""
self.collect_temps_stack.append(set())
def stop_collecting_temps(self):
return self.collect_temps_stack.pop()
def init_closure_temps(self, scope):
self.closure_temps = ClosureTempAllocator(scope)
class NumConst(object):
"""Global info about a Python number constant held by GlobalState.
cname string
value string
py_type string int, long, float
value_code string evaluation code if different from value
"""
def __init__(self, cname, value, py_type, value_code=None):
self.cname = cname
self.value = value
self.py_type = py_type
self.value_code = value_code or value
class PyObjectConst(object):
"""Global info about a generic constant held by GlobalState.
"""
# cname string
# type PyrexType
def __init__(self, cname, type):
self.cname = cname
self.type = type
cython.declare(possible_unicode_identifier=object, possible_bytes_identifier=object,
replace_identifier=object, find_alphanums=object)
possible_unicode_identifier = re.compile(br"(?![0-9])\w+$".decode('ascii'), re.U).match
possible_bytes_identifier = re.compile(r"(?![0-9])\w+$".encode('ASCII')).match
replace_identifier = re.compile(r'[^a-zA-Z0-9_]+').sub
find_alphanums = re.compile('([a-zA-Z0-9]+)').findall
class StringConst(object):
"""Global info about a C string constant held by GlobalState.
"""
# cname string
# text EncodedString or BytesLiteral
# py_strings {(identifier, encoding) : PyStringConst}
def __init__(self, cname, text, byte_string):
self.cname = cname
self.text = text
self.escaped_value = StringEncoding.escape_byte_string(byte_string)
self.py_strings = None
self.py_versions = []
def add_py_version(self, version):
if not version:
self.py_versions = [2, 3]
elif version not in self.py_versions:
self.py_versions.append(version)
def get_py_string_const(self, encoding, identifier=None,
is_str=False, py3str_cstring=None):
py_strings = self.py_strings
text = self.text
is_str = bool(identifier or is_str)
is_unicode = encoding is None and not is_str
if encoding is None:
# unicode string
encoding_key = None
else:
# bytes or str
encoding = encoding.lower()
if encoding in ('utf8', 'utf-8', 'ascii', 'usascii', 'us-ascii'):
encoding = None
encoding_key = None
else:
encoding_key = ''.join(find_alphanums(encoding))
key = (is_str, is_unicode, encoding_key, py3str_cstring)
if py_strings is not None:
try:
return py_strings[key]
except KeyError:
pass
else:
self.py_strings = {}
if identifier:
intern = True
elif identifier is None:
if isinstance(text, bytes):
intern = bool(possible_bytes_identifier(text))
else:
intern = bool(possible_unicode_identifier(text))
else:
intern = False
if intern:
prefix = Naming.interned_prefixes['str']
else:
prefix = Naming.py_const_prefix
if encoding_key:
encoding_prefix = '_%s' % encoding_key
else:
encoding_prefix = ''
pystring_cname = "%s%s%s_%s" % (
prefix,
(is_str and 's') or (is_unicode and 'u') or 'b',
encoding_prefix,
self.cname[len(Naming.const_prefix):])
py_string = PyStringConst(
pystring_cname, encoding, is_unicode, is_str, py3str_cstring, intern)
self.py_strings[key] = py_string
return py_string
class PyStringConst(object):
"""Global info about a Python string constant held by GlobalState.
"""
# cname string
# py3str_cstring string
# encoding string
# intern boolean
# is_unicode boolean
# is_str boolean
def __init__(self, cname, encoding, is_unicode, is_str=False,
py3str_cstring=None, intern=False):
self.cname = cname
self.py3str_cstring = py3str_cstring
self.encoding = encoding
self.is_str = is_str
self.is_unicode = is_unicode
self.intern = intern
def __lt__(self, other):
return self.cname < other.cname
class GlobalState(object):
# filename_table {string : int} for finding filename table indexes
# filename_list [string] filenames in filename table order
# input_file_contents dict contents (=list of lines) of any file that was used as input
# to create this output C code. This is
# used to annotate the comments.
#
# utility_codes set IDs of used utility code (to avoid reinsertion)
#
# declared_cnames {string:Entry} used in a transition phase to merge pxd-declared
# constants etc. into the pyx-declared ones (i.e,
# check if constants are already added).
# In time, hopefully the literals etc. will be
# supplied directly instead.
#
# const_cnames_used dict global counter for unique constant identifiers
#
# parts {string:CCodeWriter}
# interned_strings
# consts
# interned_nums
# directives set Temporary variable used to track
# the current set of directives in the code generation
# process.
directives = {}
code_layout = [
'h_code',
'filename_table',
'utility_code_proto_before_types',
'numeric_typedefs', # Let these detailed individual parts stay!,
'complex_type_declarations', # as the proper solution is to make a full DAG...
'type_declarations', # More coarse-grained blocks would simply hide
'utility_code_proto', # the ugliness, not fix it
'module_declarations',
'typeinfo',
'before_global_var',
'global_var',
'string_decls',
'decls',
'late_includes',
'all_the_rest',
'pystring_table',
'cached_builtins',
'cached_constants',
'init_globals',
'init_module',
'cleanup_globals',
'cleanup_module',
'main_method',
'utility_code_def',
'end'
]
def __init__(self, writer, module_node, code_config, common_utility_include_dir=None):
self.filename_table = {}
self.filename_list = []
self.input_file_contents = {}
self.utility_codes = set()
self.declared_cnames = {}
self.in_utility_code_generation = False
self.code_config = code_config
self.common_utility_include_dir = common_utility_include_dir
self.parts = {}
self.module_node = module_node # because some utility code generation needs it
# (generating backwards-compatible Get/ReleaseBuffer
self.const_cnames_used = {}
self.string_const_index = {}
self.dedup_const_index = {}
self.pyunicode_ptr_const_index = {}
self.num_const_index = {}
self.py_constants = []
self.cached_cmethods = {}
self.initialised_constants = set()
writer.set_global_state(self)
self.rootwriter = writer
def initialize_main_c_code(self):
rootwriter = self.rootwriter
for part in self.code_layout:
self.parts[part] = rootwriter.insertion_point()
if not Options.cache_builtins:
del self.parts['cached_builtins']
else:
w = self.parts['cached_builtins']
w.enter_cfunc_scope()
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {")
w = self.parts['cached_constants']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {")
w.put_declare_refcount_context()
w.put_setup_refcount_context("__Pyx_InitCachedConstants")
w = self.parts['init_globals']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {")
if not Options.generate_cleanup_code:
del self.parts['cleanup_globals']
else:
w = self.parts['cleanup_globals']
w.enter_cfunc_scope()
w.putln("")
w.putln("static CYTHON_SMALL_CODE void __Pyx_CleanupGlobals(void) {")
code = self.parts['utility_code_proto']
code.putln("")
code.putln("/* --- Runtime support code (head) --- */")
code = self.parts['utility_code_def']
if self.code_config.emit_linenums:
code.write('\n#line 1 "cython_utility"\n')
code.putln("")
code.putln("/* --- Runtime support code --- */")
def finalize_main_c_code(self):
self.close_global_decls()
#
# utility_code_def
#
code = self.parts['utility_code_def']
util = TempitaUtilityCode.load_cached("TypeConversions", "TypeConversion.c")
code.put(util.format_code(util.impl))
code.putln("")
def __getitem__(self, key):
return self.parts[key]
#
# Global constants, interned objects, etc.
#
def close_global_decls(self):
# This is called when it is known that no more global declarations will
# declared.
self.generate_const_declarations()
if Options.cache_builtins:
w = self.parts['cached_builtins']
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
w = self.parts['cached_constants']
w.put_finish_refcount_context()
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.put_finish_refcount_context()
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
w = self.parts['init_globals']
w.putln("return 0;")
if w.label_used(w.error_label):
w.put_label(w.error_label)
w.putln("return -1;")
w.putln("}")
w.exit_cfunc_scope()
if Options.generate_cleanup_code:
w = self.parts['cleanup_globals']
w.putln("}")
w.exit_cfunc_scope()
if Options.generate_cleanup_code:
w = self.parts['cleanup_module']
w.putln("}")
w.exit_cfunc_scope()
def put_pyobject_decl(self, entry):
self['global_var'].putln("static PyObject *%s;" % entry.cname)
# constant handling at code generation time
def get_cached_constants_writer(self, target=None):
if target is not None:
if target in self.initialised_constants:
# Return None on second/later calls to prevent duplicate creation code.
return None
self.initialised_constants.add(target)
return self.parts['cached_constants']
def get_int_const(self, str_value, longness=False):
py_type = longness and 'long' or 'int'
try:
c = self.num_const_index[(str_value, py_type)]
except KeyError:
c = self.new_num_const(str_value, py_type)
return c
def get_float_const(self, str_value, value_code):
try:
c = self.num_const_index[(str_value, 'float')]
except KeyError:
c = self.new_num_const(str_value, 'float', value_code)
return c
def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
if dedup_key is not None:
const = self.dedup_const_index.get(dedup_key)
if const is not None:
return const
# create a new Python object constant
const = self.new_py_const(type, prefix)
if cleanup_level is not None \
and cleanup_level <= Options.generate_cleanup_code:
cleanup_writer = self.parts['cleanup_globals']
cleanup_writer.putln('Py_CLEAR(%s);' % const.cname)
if dedup_key is not None:
self.dedup_const_index[dedup_key] = const
return const
def get_string_const(self, text, py_version=None):
# return a C string constant, creating a new one if necessary
if text.is_unicode:
byte_string = text.utf8encode()
else:
byte_string = text.byteencode()
try:
c = self.string_const_index[byte_string]
except KeyError:
c = self.new_string_const(text, byte_string)
c.add_py_version(py_version)
return c
def get_pyunicode_ptr_const(self, text):
# return a Py_UNICODE[] constant, creating a new one if necessary
assert text.is_unicode
try:
c = self.pyunicode_ptr_const_index[text]
except KeyError:
c = self.pyunicode_ptr_const_index[text] = self.new_const_cname()
return c
def get_py_string_const(self, text, identifier=None,
is_str=False, unicode_value=None):
# return a Python string constant, creating a new one if necessary
py3str_cstring = None
if is_str and unicode_value is not None \
and unicode_value.utf8encode() != text.byteencode():
py3str_cstring = self.get_string_const(unicode_value, py_version=3)
c_string = self.get_string_const(text, py_version=2)
else:
c_string = self.get_string_const(text)
py_string = c_string.get_py_string_const(
text.encoding, identifier, is_str, py3str_cstring)
return py_string
def get_interned_identifier(self, text):
return self.get_py_string_const(text, identifier=True)
def new_string_const(self, text, byte_string):
cname = self.new_string_const_cname(byte_string)
c = StringConst(cname, text, byte_string)
self.string_const_index[byte_string] = c
return c
def new_num_const(self, value, py_type, value_code=None):
cname = self.new_num_const_cname(value, py_type)
c = NumConst(cname, value, py_type, value_code)
self.num_const_index[(value, py_type)] = c
return c
def new_py_const(self, type, prefix=''):
cname = self.new_const_cname(prefix)
c = PyObjectConst(cname, type)
self.py_constants.append(c)
return c
def new_string_const_cname(self, bytes_value):
# Create a new globally-unique nice name for a C string constant.
value = bytes_value.decode('ASCII', 'ignore')
return self.new_const_cname(value=value)
def new_num_const_cname(self, value, py_type):
if py_type == 'long':
value += 'L'
py_type = 'int'
prefix = Naming.interned_prefixes[py_type]
cname = "%s%s" % (prefix, value)
cname = cname.replace('+', '_').replace('-', 'neg_').replace('.', '_')
return cname
def new_const_cname(self, prefix='', value=''):
value = replace_identifier('_', value)[:32].strip('_')
used = self.const_cnames_used
name_suffix = value
while name_suffix in used:
counter = used[value] = used[value] + 1
name_suffix = '%s_%d' % (value, counter)
used[name_suffix] = 1
if prefix:
prefix = Naming.interned_prefixes[prefix]
else:
prefix = Naming.const_prefix
return "%s%s" % (prefix, name_suffix)
def get_cached_unbound_method(self, type_cname, method_name):
key = (type_cname, method_name)
try:
cname = self.cached_cmethods[key]
except KeyError:
cname = self.cached_cmethods[key] = self.new_const_cname(
'umethod', '%s_%s' % (type_cname, method_name))
return cname
def cached_unbound_method_call_code(self, obj_cname, type_cname, method_name, arg_cnames):
# admittedly, not the best place to put this method, but it is reused by UtilityCode and ExprNodes ...
utility_code_name = "CallUnboundCMethod%d" % len(arg_cnames)
self.use_utility_code(UtilityCode.load_cached(utility_code_name, "ObjectHandling.c"))
cache_cname = self.get_cached_unbound_method(type_cname, method_name)
args = [obj_cname] + arg_cnames
return "__Pyx_%s(&%s, %s)" % (
utility_code_name,
cache_cname,
', '.join(args),
)
def add_cached_builtin_decl(self, entry):
if entry.is_builtin and entry.is_const:
if self.should_declare(entry.cname, entry):
self.put_pyobject_decl(entry)
w = self.parts['cached_builtins']
condition = None
if entry.name in non_portable_builtins_map:
condition, replacement = non_portable_builtins_map[entry.name]
w.putln('#if %s' % condition)
self.put_cached_builtin_init(
entry.pos, StringEncoding.EncodedString(replacement),
entry.cname)
w.putln('#else')
self.put_cached_builtin_init(
entry.pos, StringEncoding.EncodedString(entry.name),
entry.cname)
if condition:
w.putln('#endif')
def put_cached_builtin_init(self, pos, name, cname):
w = self.parts['cached_builtins']
interned_cname = self.get_interned_identifier(name).cname
self.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
w.putln('%s = __Pyx_GetBuiltinName(%s); if (!%s) %s' % (
cname,
interned_cname,
cname,
w.error_goto(pos)))
def generate_const_declarations(self):
self.generate_cached_methods_decls()
self.generate_string_constants()
self.generate_num_constants()
self.generate_object_constant_decls()
def generate_object_constant_decls(self):
consts = [(len(c.cname), c.cname, c)
for c in self.py_constants]
consts.sort()
decls_writer = self.parts['decls']
for _, cname, c in consts:
decls_writer.putln(
"static %s;" % c.type.declaration_code(cname))
def generate_cached_methods_decls(self):
if not self.cached_cmethods:
return
decl = self.parts['decls']
init = self.parts['init_globals']
cnames = []
for (type_cname, method_name), cname in sorted(self.cached_cmethods.items()):
cnames.append(cname)
method_name_cname = self.get_interned_identifier(StringEncoding.EncodedString(method_name)).cname
decl.putln('static __Pyx_CachedCFunction %s = {0, &%s, 0, 0, 0};' % (
cname, method_name_cname))
# split type reference storage as it might not be static
init.putln('%s.type = (PyObject*)&%s;' % (
cname, type_cname))
if Options.generate_cleanup_code:
cleanup = self.parts['cleanup_globals']
for cname in cnames:
cleanup.putln("Py_CLEAR(%s.method);" % cname)
def generate_string_constants(self):
c_consts = [(len(c.cname), c.cname, c) for c in self.string_const_index.values()]
c_consts.sort()
py_strings = []
decls_writer = self.parts['string_decls']
for _, cname, c in c_consts:
conditional = False
if c.py_versions and (2 not in c.py_versions or 3 not in c.py_versions):
conditional = True
decls_writer.putln("#if PY_MAJOR_VERSION %s 3" % (
(2 in c.py_versions) and '<' or '>='))
decls_writer.putln('static const char %s[] = "%s";' % (
cname, StringEncoding.split_string_literal(c.escaped_value)))
if conditional:
decls_writer.putln("#endif")
if c.py_strings is not None:
for py_string in c.py_strings.values():
py_strings.append((c.cname, len(py_string.cname), py_string))
for c, cname in sorted(self.pyunicode_ptr_const_index.items()):
utf16_array, utf32_array = StringEncoding.encode_pyunicode_string(c)
if utf16_array:
# Narrow and wide representations differ
decls_writer.putln("#ifdef Py_UNICODE_WIDE")
decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf32_array))
if utf16_array:
decls_writer.putln("#else")
decls_writer.putln("static Py_UNICODE %s[] = { %s };" % (cname, utf16_array))
decls_writer.putln("#endif")
if py_strings:
self.use_utility_code(UtilityCode.load_cached("InitStrings", "StringTools.c"))
py_strings.sort()
w = self.parts['pystring_table']
w.putln("")
w.putln("static __Pyx_StringTabEntry %s[] = {" % Naming.stringtab_cname)
for c_cname, _, py_string in py_strings:
if not py_string.is_str or not py_string.encoding or \
py_string.encoding in ('ASCII', 'USASCII', 'US-ASCII',
'UTF8', 'UTF-8'):
encoding = '0'
else:
encoding = '"%s"' % py_string.encoding.lower()
decls_writer.putln(
"static PyObject *%s;" % py_string.cname)
if py_string.py3str_cstring:
w.putln("#if PY_MAJOR_VERSION >= 3")
w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
py_string.cname,
py_string.py3str_cstring.cname,
py_string.py3str_cstring.cname,
'0', 1, 0,
py_string.intern
))
w.putln("#else")
w.putln("{&%s, %s, sizeof(%s), %s, %d, %d, %d}," % (
py_string.cname,
c_cname,
c_cname,
encoding,
py_string.is_unicode,
py_string.is_str,
py_string.intern
))
if py_string.py3str_cstring:
w.putln("#endif")
w.putln("{0, 0, 0, 0, 0, 0, 0}")
w.putln("};")
init_globals = self.parts['init_globals']
init_globals.putln(
"if (__Pyx_InitStrings(%s) < 0) %s;" % (
Naming.stringtab_cname,
init_globals.error_goto(self.module_pos)))
def generate_num_constants(self):
consts = [(c.py_type, c.value[0] == '-', len(c.value), c.value, c.value_code, c)
for c in self.num_const_index.values()]
consts.sort()
decls_writer = self.parts['decls']
init_globals = self.parts['init_globals']
for py_type, _, _, value, value_code, c in consts:
cname = c.cname
decls_writer.putln("static PyObject *%s;" % cname)
if py_type == 'float':
function = 'PyFloat_FromDouble(%s)'
elif py_type == 'long':
function = 'PyLong_FromString((char *)"%s", 0, 0)'
elif Utils.long_literal(value):
function = 'PyInt_FromString((char *)"%s", 0, 0)'
elif len(value.lstrip('-')) > 4:
function = "PyInt_FromLong(%sL)"
else:
function = "PyInt_FromLong(%s)"
init_globals.putln('%s = %s; %s' % (
cname, function % value_code,
init_globals.error_goto_if_null(cname, self.module_pos)))
# The functions below are there in a transition phase only
# and will be deprecated. They are called from Nodes.BlockNode.
# The copy&paste duplication is intentional in order to be able
# to see quickly how BlockNode worked, until this is replaced.
def should_declare(self, cname, entry):
if cname in self.declared_cnames:
other = self.declared_cnames[cname]
assert str(entry.type) == str(other.type)
assert entry.init == other.init
return False
else:
self.declared_cnames[cname] = entry
return True
#
# File name state
#
def lookup_filename(self, source_desc):
entry = source_desc.get_filenametable_entry()
try:
index = self.filename_table[entry]
except KeyError:
index = len(self.filename_list)
self.filename_list.append(source_desc)
self.filename_table[entry] = index
return index
def commented_file_contents(self, source_desc):
try:
return self.input_file_contents[source_desc]
except KeyError:
pass
source_file = source_desc.get_lines(encoding='ASCII',
error_handling='ignore')
try:
F = [u' * ' + line.rstrip().replace(
u'*/', u'*[inserted by cython to avoid comment closer]/'
).replace(
u'/*', u'/[inserted by cython to avoid comment start]*'
)
for line in source_file]
finally:
if hasattr(source_file, 'close'):
source_file.close()
if not F: F.append(u'')
self.input_file_contents[source_desc] = F
return F
#
# Utility code state
#
def use_utility_code(self, utility_code):
"""
Adds code to the C file. utility_code should
a) implement __eq__/__hash__ for the purpose of knowing whether the same
code has already been included
b) implement put_code, which takes a globalstate instance
See UtilityCode.
"""
if utility_code and utility_code not in self.utility_codes:
self.utility_codes.add(utility_code)
utility_code.put_code(self)
def use_entry_utility_code(self, entry):
if entry is None:
return
if entry.utility_code:
self.use_utility_code(entry.utility_code)
if entry.utility_code_definition:
self.use_utility_code(entry.utility_code_definition)
def funccontext_property(func):
name = func.__name__
attribute_of = operator.attrgetter(name)
def get(self):
return attribute_of(self.funcstate)
def set(self, value):
setattr(self.funcstate, name, value)
return property(get, set)
class CCodeConfig(object):
# emit_linenums boolean write #line pragmas?
# emit_code_comments boolean copy the original code into C comments?
# c_line_in_traceback boolean append the c file and line number to the traceback for exceptions?
def __init__(self, emit_linenums=True, emit_code_comments=True, c_line_in_traceback=True):
self.emit_code_comments = emit_code_comments
self.emit_linenums = emit_linenums
self.c_line_in_traceback = c_line_in_traceback
class CCodeWriter(object):
"""
Utility class to output C code.
When creating an insertion point one must care about the state that is
kept:
- formatting state (level, bol) is cloned and used in insertion points
as well
- labels, temps, exc_vars: One must construct a scope in which these can
exist by calling enter_cfunc_scope/exit_cfunc_scope (these are for
sanity checking and forward compatibility). Created insertion points
looses this scope and cannot access it.
- marker: Not copied to insertion point
- filename_table, filename_list, input_file_contents: All codewriters
coming from the same root share the same instances simultaneously.
"""
# f file output file
# buffer StringIOTree
# level int indentation level
# bol bool beginning of line?
# marker string comment to emit before next line
# funcstate FunctionState contains state local to a C function used for code
# generation (labels and temps state etc.)
# globalstate GlobalState contains state global for a C file (input file info,
# utility code, declared constants etc.)
# pyclass_stack list used during recursive code generation to pass information
# about the current class one is in
# code_config CCodeConfig configuration options for the C code writer
@cython.locals(create_from='CCodeWriter')
def __init__(self, create_from=None, buffer=None, copy_formatting=False):
if buffer is None: buffer = StringIOTree()
self.buffer = buffer
self.last_pos = None
self.last_marked_pos = None
self.pyclass_stack = []
self.funcstate = None
self.globalstate = None
self.code_config = None
self.level = 0
self.call_level = 0
self.bol = 1
if create_from is not None:
# Use same global state
self.set_global_state(create_from.globalstate)
self.funcstate = create_from.funcstate
# Clone formatting state
if copy_formatting:
self.level = create_from.level
self.bol = create_from.bol
self.call_level = create_from.call_level
self.last_pos = create_from.last_pos
self.last_marked_pos = create_from.last_marked_pos
def create_new(self, create_from, buffer, copy_formatting):
# polymorphic constructor -- very slightly more versatile
# than using __class__
result = CCodeWriter(create_from, buffer, copy_formatting)
return result
def set_global_state(self, global_state):
assert self.globalstate is None # prevent overwriting once it's set
self.globalstate = global_state
self.code_config = global_state.code_config
def copyto(self, f):
self.buffer.copyto(f)
def getvalue(self):
return self.buffer.getvalue()
def write(self, s):
# also put invalid markers (lineno 0), to indicate that those lines
# have no Cython source code correspondence
cython_lineno = self.last_marked_pos[1] if self.last_marked_pos else 0
self.buffer.markers.extend([cython_lineno] * s.count('\n'))
self.buffer.write(s)
def insertion_point(self):
other = self.create_new(create_from=self, buffer=self.buffer.insertion_point(), copy_formatting=True)
return other
def new_writer(self):
"""
Creates a new CCodeWriter connected to the same global state, which
can later be inserted using insert.
"""
return CCodeWriter(create_from=self)
def insert(self, writer):
"""
Inserts the contents of another code writer (created with
the same global state) in the current location.
It is ok to write to the inserted writer also after insertion.
"""
assert writer.globalstate is self.globalstate
self.buffer.insert(writer.buffer)
# Properties delegated to function scope
@funccontext_property
def label_counter(self): pass
@funccontext_property
def return_label(self): pass
@funccontext_property
def error_label(self): pass
@funccontext_property
def labels_used(self): pass
@funccontext_property
def continue_label(self): pass
@funccontext_property
def break_label(self): pass
@funccontext_property
def return_from_error_cleanup_label(self): pass
@funccontext_property
def yield_labels(self): pass
# Functions delegated to function scope
def new_label(self, name=None): return self.funcstate.new_label(name)
def new_error_label(self): return self.funcstate.new_error_label()
def new_yield_label(self, *args): return self.funcstate.new_yield_label(*args)
def get_loop_labels(self): return self.funcstate.get_loop_labels()
def set_loop_labels(self, labels): return self.funcstate.set_loop_labels(labels)
def new_loop_labels(self): return self.funcstate.new_loop_labels()
def get_all_labels(self): return self.funcstate.get_all_labels()
def set_all_labels(self, labels): return self.funcstate.set_all_labels(labels)
def all_new_labels(self): return self.funcstate.all_new_labels()
def use_label(self, lbl): return self.funcstate.use_label(lbl)
def label_used(self, lbl): return self.funcstate.label_used(lbl)
def enter_cfunc_scope(self, scope=None):
self.funcstate = FunctionState(self, scope=scope)
def exit_cfunc_scope(self):
self.funcstate = None
# constant handling
def get_py_int(self, str_value, longness):
return self.globalstate.get_int_const(str_value, longness).cname
def get_py_float(self, str_value, value_code):
return self.globalstate.get_float_const(str_value, value_code).cname
def get_py_const(self, type, prefix='', cleanup_level=None, dedup_key=None):
return self.globalstate.get_py_const(type, prefix, cleanup_level, dedup_key).cname
def get_string_const(self, text):
return self.globalstate.get_string_const(text).cname
def get_pyunicode_ptr_const(self, text):
return self.globalstate.get_pyunicode_ptr_const(text)
def get_py_string_const(self, text, identifier=None,
is_str=False, unicode_value=None):
return self.globalstate.get_py_string_const(
text, identifier, is_str, unicode_value).cname
def get_argument_default_const(self, type):
return self.globalstate.get_py_const(type).cname
def intern(self, text):
return self.get_py_string_const(text)
def intern_identifier(self, text):
return self.get_py_string_const(text, identifier=True)
def get_cached_constants_writer(self, target=None):
return self.globalstate.get_cached_constants_writer(target)
# code generation
def putln(self, code="", safe=False):
if self.last_pos and self.bol:
self.emit_marker()
if self.code_config.emit_linenums and self.last_marked_pos:
source_desc, line, _ = self.last_marked_pos
self.write('\n#line %s "%s"\n' % (line, source_desc.get_escaped_description()))
if code:
if safe:
self.put_safe(code)
else:
self.put(code)
self.write("\n")
self.bol = 1
def mark_pos(self, pos, trace=True):
if pos is None:
return
if self.last_marked_pos and self.last_marked_pos[:2] == pos[:2]:
return
self.last_pos = (pos, trace)
def emit_marker(self):
pos, trace = self.last_pos
self.last_marked_pos = pos
self.last_pos = None
self.write("\n")
if self.code_config.emit_code_comments:
self.indent()
self.write("/* %s */\n" % self._build_marker(pos))
if trace and self.funcstate and self.funcstate.can_trace and self.globalstate.directives['linetrace']:
self.indent()
self.write('__Pyx_TraceLine(%d,%d,%s)\n' % (
pos[1], not self.funcstate.gil_owned, self.error_goto(pos)))
def _build_marker(self, pos):
source_desc, line, col = pos
assert isinstance(source_desc, SourceDescriptor)
contents = self.globalstate.commented_file_contents(source_desc)
lines = contents[max(0, line-3):line] # line numbers start at 1
lines[-1] += u' # <<<<<<<<<<<<<<'
lines += contents[line:line+2]
return u'"%s":%d\n%s\n' % (source_desc.get_escaped_description(), line, u'\n'.join(lines))
def put_safe(self, code):
# put code, but ignore {}
self.write(code)
self.bol = 0
def put_or_include(self, code, name):
include_dir = self.globalstate.common_utility_include_dir
if include_dir and len(code) > 1024:
include_file = "%s_%s.h" % (
name, hashlib.md5(code.encode('utf8')).hexdigest())
path = os.path.join(include_dir, include_file)
if not os.path.exists(path):
tmp_path = '%s.tmp%s' % (path, os.getpid())
with closing(Utils.open_new_file(tmp_path)) as f:
f.write(code)
shutil.move(tmp_path, path)
code = '#include "%s"\n' % path
self.put(code)
def put(self, code):
fix_indent = False
if "{" in code:
dl = code.count("{")
else:
dl = 0
if "}" in code:
dl -= code.count("}")
if dl < 0:
self.level += dl
elif dl == 0 and code[0] == "}":
# special cases like "} else {" need a temporary dedent
fix_indent = True
self.level -= 1
if self.bol:
self.indent()
self.write(code)
self.bol = 0
if dl > 0:
self.level += dl
elif fix_indent:
self.level += 1
def putln_tempita(self, code, **context):
from ..Tempita import sub
self.putln(sub(code, **context))
def put_tempita(self, code, **context):
from ..Tempita import sub
self.put(sub(code, **context))
def increase_indent(self):
self.level += 1
def decrease_indent(self):
self.level -= 1
def begin_block(self):
self.putln("{")
self.increase_indent()
def end_block(self):
self.decrease_indent()
self.putln("}")
def indent(self):
self.write(" " * self.level)
def get_py_version_hex(self, pyversion):
return "0x%02X%02X%02X%02X" % (tuple(pyversion) + (0,0,0,0))[:4]
def put_label(self, lbl):
if lbl in self.funcstate.labels_used:
self.putln("%s:;" % lbl)
def put_goto(self, lbl):
self.funcstate.use_label(lbl)
self.putln("goto %s;" % lbl)
def put_var_declaration(self, entry, storage_class="",
dll_linkage=None, definition=True):
#print "Code.put_var_declaration:", entry.name, "definition =", definition ###
if entry.visibility == 'private' and not (definition or entry.defined_in_pxd):
#print "...private and not definition, skipping", entry.cname ###
return
if entry.visibility == "private" and not entry.used:
#print "...private and not used, skipping", entry.cname ###
return
if storage_class:
self.put("%s " % storage_class)
if not entry.cf_used:
self.put('CYTHON_UNUSED ')
self.put(entry.type.declaration_code(
entry.cname, dll_linkage=dll_linkage))
if entry.init is not None:
self.put_safe(" = %s" % entry.type.literal_code(entry.init))
elif entry.type.is_pyobject:
self.put(" = NULL")
self.putln(";")
def put_temp_declarations(self, func_context):
for name, type, manage_ref, static in func_context.temps_allocated:
decl = type.declaration_code(name)
if type.is_pyobject:
self.putln("%s = NULL;" % decl)
elif type.is_memoryviewslice:
from . import MemoryView
self.putln("%s = %s;" % (decl, MemoryView.memslice_entry_init))
else:
self.putln("%s%s;" % (static and "static " or "", decl))
if func_context.should_declare_error_indicator:
if self.funcstate.uses_error_indicator:
unused = ''
else:
unused = 'CYTHON_UNUSED '
# Initialize these variables to silence compiler warnings
self.putln("%sint %s = 0;" % (unused, Naming.lineno_cname))
self.putln("%sconst char *%s = NULL;" % (unused, Naming.filename_cname))
self.putln("%sint %s = 0;" % (unused, Naming.clineno_cname))
def put_generated_by(self):
self.putln("/* Generated by Cython %s */" % Version.watermark)
self.putln("")
def put_h_guard(self, guard):
self.putln("#ifndef %s" % guard)
self.putln("#define %s" % guard)
def unlikely(self, cond):
if Options.gcc_branch_hints:
return 'unlikely(%s)' % cond
else:
return cond
def build_function_modifiers(self, modifiers, mapper=modifier_output_mapper):
if not modifiers:
return ''
return '%s ' % ' '.join([mapper(m,m) for m in modifiers])
# Python objects and reference counting
def entry_as_pyobject(self, entry):
type = entry.type
if (not entry.is_self_arg and not entry.type.is_complete()
or entry.type.is_extension_type):
return "(PyObject *)" + entry.cname
else:
return entry.cname
def as_pyobject(self, cname, type):
from .PyrexTypes import py_object_type, typecast
return typecast(py_object_type, type, cname)
def put_gotref(self, cname):
self.putln("__Pyx_GOTREF(%s);" % cname)
def put_giveref(self, cname):
self.putln("__Pyx_GIVEREF(%s);" % cname)
def put_xgiveref(self, cname):
self.putln("__Pyx_XGIVEREF(%s);" % cname)
def put_xgotref(self, cname):
self.putln("__Pyx_XGOTREF(%s);" % cname)
def put_incref(self, cname, type, nanny=True):
if nanny:
self.putln("__Pyx_INCREF(%s);" % self.as_pyobject(cname, type))
else:
self.putln("Py_INCREF(%s);" % self.as_pyobject(cname, type))
def put_decref(self, cname, type, nanny=True):
self._put_decref(cname, type, nanny, null_check=False, clear=False)
def put_var_gotref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_GOTREF(%s);" % self.entry_as_pyobject(entry))
def put_var_giveref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_GIVEREF(%s);" % self.entry_as_pyobject(entry))
def put_var_xgotref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_XGOTREF(%s);" % self.entry_as_pyobject(entry))
def put_var_xgiveref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_XGIVEREF(%s);" % self.entry_as_pyobject(entry))
def put_var_incref(self, entry, nanny=True):
if entry.type.is_pyobject:
if nanny:
self.putln("__Pyx_INCREF(%s);" % self.entry_as_pyobject(entry))
else:
self.putln("Py_INCREF(%s);" % self.entry_as_pyobject(entry))
def put_var_xincref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_XINCREF(%s);" % self.entry_as_pyobject(entry))
def put_decref_clear(self, cname, type, nanny=True, clear_before_decref=False):
self._put_decref(cname, type, nanny, null_check=False,
clear=True, clear_before_decref=clear_before_decref)
def put_xdecref(self, cname, type, nanny=True, have_gil=True):
self._put_decref(cname, type, nanny, null_check=True,
have_gil=have_gil, clear=False)
def put_xdecref_clear(self, cname, type, nanny=True, clear_before_decref=False):
self._put_decref(cname, type, nanny, null_check=True,
clear=True, clear_before_decref=clear_before_decref)
def _put_decref(self, cname, type, nanny=True, null_check=False,
have_gil=True, clear=False, clear_before_decref=False):
if type.is_memoryviewslice:
self.put_xdecref_memoryviewslice(cname, have_gil=have_gil)
return
prefix = '__Pyx' if nanny else 'Py'
X = 'X' if null_check else ''
if clear:
if clear_before_decref:
if not nanny:
X = '' # CPython doesn't have a Py_XCLEAR()
self.putln("%s_%sCLEAR(%s);" % (prefix, X, cname))
else:
self.putln("%s_%sDECREF(%s); %s = 0;" % (
prefix, X, self.as_pyobject(cname, type), cname))
else:
self.putln("%s_%sDECREF(%s);" % (
prefix, X, self.as_pyobject(cname, type)))
def put_decref_set(self, cname, rhs_cname):
self.putln("__Pyx_DECREF_SET(%s, %s);" % (cname, rhs_cname))
def put_xdecref_set(self, cname, rhs_cname):
self.putln("__Pyx_XDECREF_SET(%s, %s);" % (cname, rhs_cname))
def put_var_decref(self, entry):
if entry.type.is_pyobject:
self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
def put_var_xdecref(self, entry, nanny=True):
if entry.type.is_pyobject:
if nanny:
self.putln("__Pyx_XDECREF(%s);" % self.entry_as_pyobject(entry))
else:
self.putln("Py_XDECREF(%s);" % self.entry_as_pyobject(entry))
def put_var_decref_clear(self, entry):
self._put_var_decref_clear(entry, null_check=False)
def put_var_xdecref_clear(self, entry):
self._put_var_decref_clear(entry, null_check=True)
def _put_var_decref_clear(self, entry, null_check):
if entry.type.is_pyobject:
if entry.in_closure:
# reset before DECREF to make sure closure state is
# consistent during call to DECREF()
self.putln("__Pyx_%sCLEAR(%s);" % (
null_check and 'X' or '',
entry.cname))
else:
self.putln("__Pyx_%sDECREF(%s); %s = 0;" % (
null_check and 'X' or '',
self.entry_as_pyobject(entry),
entry.cname))
def put_var_decrefs(self, entries, used_only = 0):
for entry in entries:
if not used_only or entry.used:
if entry.xdecref_cleanup:
self.put_var_xdecref(entry)
else:
self.put_var_decref(entry)
def put_var_xdecrefs(self, entries):
for entry in entries:
self.put_var_xdecref(entry)
def put_var_xdecrefs_clear(self, entries):
for entry in entries:
self.put_var_xdecref_clear(entry)
def put_incref_memoryviewslice(self, slice_cname, have_gil=False):
from . import MemoryView
self.globalstate.use_utility_code(MemoryView.memviewslice_init_code)
self.putln("__PYX_INC_MEMVIEW(&%s, %d);" % (slice_cname, int(have_gil)))
def put_xdecref_memoryviewslice(self, slice_cname, have_gil=False):
from . import MemoryView
self.globalstate.use_utility_code(MemoryView.memviewslice_init_code)
self.putln("__PYX_XDEC_MEMVIEW(&%s, %d);" % (slice_cname, int(have_gil)))
def put_xgiveref_memoryviewslice(self, slice_cname):
self.put_xgiveref("%s.memview" % slice_cname)
def put_init_to_py_none(self, cname, type, nanny=True):
from .PyrexTypes import py_object_type, typecast
py_none = typecast(type, py_object_type, "Py_None")
if nanny:
self.putln("%s = %s; __Pyx_INCREF(Py_None);" % (cname, py_none))
else:
self.putln("%s = %s; Py_INCREF(Py_None);" % (cname, py_none))
def put_init_var_to_py_none(self, entry, template = "%s", nanny=True):
code = template % entry.cname
#if entry.type.is_extension_type:
# code = "((PyObject*)%s)" % code
self.put_init_to_py_none(code, entry.type, nanny)
if entry.in_closure:
self.put_giveref('Py_None')
def put_pymethoddef(self, entry, term, allow_skip=True, wrapper_code_writer=None):
if entry.is_special or entry.name == '__getattribute__':
if entry.name not in special_py_methods:
if entry.name == '__getattr__' and not self.globalstate.directives['fast_getattr']:
pass
# Python's typeobject.c will automatically fill in our slot
# in add_operators() (called by PyType_Ready) with a value
# that's better than ours.
elif allow_skip:
return
method_flags = entry.signature.method_flags()
if not method_flags:
return
if entry.is_special:
from . import TypeSlots
method_flags += [TypeSlots.method_coexist]
func_ptr = wrapper_code_writer.put_pymethoddef_wrapper(entry) if wrapper_code_writer else entry.func_cname
# Add required casts, but try not to shadow real warnings.
cast = '__Pyx_PyCFunctionFast' if 'METH_FASTCALL' in method_flags else 'PyCFunction'
if 'METH_KEYWORDS' in method_flags:
cast += 'WithKeywords'
if cast != 'PyCFunction':
func_ptr = '(void*)(%s)%s' % (cast, func_ptr)
self.putln(
'{"%s", (PyCFunction)%s, %s, %s}%s' % (
entry.name,
func_ptr,
"|".join(method_flags),
entry.doc_cname if entry.doc else '0',
term))
def put_pymethoddef_wrapper(self, entry):
func_cname = entry.func_cname
if entry.is_special:
method_flags = entry.signature.method_flags()
if method_flags and 'METH_NOARGS' in method_flags:
# Special NOARGS methods really take no arguments besides 'self', but PyCFunction expects one.
func_cname = Naming.method_wrapper_prefix + func_cname
self.putln("static PyObject *%s(PyObject *self, CYTHON_UNUSED PyObject *arg) {return %s(self);}" % (
func_cname, entry.func_cname))
return func_cname
# GIL methods
def put_ensure_gil(self, declare_gilstate=True, variable=None):
"""
Acquire the GIL. The generated code is safe even when no PyThreadState
has been allocated for this thread (for threads not initialized by
using the Python API). Additionally, the code generated by this method
may be called recursively.
"""
self.globalstate.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
if self.globalstate.directives['fast_gil']:
self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
else:
self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
self.putln("#ifdef WITH_THREAD")
if not variable:
variable = '__pyx_gilstate_save'
if declare_gilstate:
self.put("PyGILState_STATE ")
self.putln("%s = __Pyx_PyGILState_Ensure();" % variable)
self.putln("#endif")
def put_release_ensured_gil(self, variable=None):
"""
Releases the GIL, corresponds to `put_ensure_gil`.
"""
if self.globalstate.directives['fast_gil']:
self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
else:
self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
if not variable:
variable = '__pyx_gilstate_save'
self.putln("#ifdef WITH_THREAD")
self.putln("__Pyx_PyGILState_Release(%s);" % variable)
self.putln("#endif")
def put_acquire_gil(self, variable=None):
"""
Acquire the GIL. The thread's thread state must have been initialized
by a previous `put_release_gil`
"""
if self.globalstate.directives['fast_gil']:
self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
else:
self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
self.putln("#ifdef WITH_THREAD")
self.putln("__Pyx_FastGIL_Forget();")
if variable:
self.putln('_save = %s;' % variable)
self.putln("Py_BLOCK_THREADS")
self.putln("#endif")
def put_release_gil(self, variable=None):
"Release the GIL, corresponds to `put_acquire_gil`."
if self.globalstate.directives['fast_gil']:
self.globalstate.use_utility_code(UtilityCode.load_cached("FastGil", "ModuleSetupCode.c"))
else:
self.globalstate.use_utility_code(UtilityCode.load_cached("NoFastGil", "ModuleSetupCode.c"))
self.putln("#ifdef WITH_THREAD")
self.putln("PyThreadState *_save;")
self.putln("Py_UNBLOCK_THREADS")
if variable:
self.putln('%s = _save;' % variable)
self.putln("__Pyx_FastGIL_Remember();")
self.putln("#endif")
def declare_gilstate(self):
self.putln("#ifdef WITH_THREAD")
self.putln("PyGILState_STATE __pyx_gilstate_save;")
self.putln("#endif")
# error handling
def put_error_if_neg(self, pos, value):
# TODO this path is almost _never_ taken, yet this macro makes is slower!
# return self.putln("if (unlikely(%s < 0)) %s" % (value, self.error_goto(pos)))
return self.putln("if (%s < 0) %s" % (value, self.error_goto(pos)))
def put_error_if_unbound(self, pos, entry, in_nogil_context=False):
from . import ExprNodes
if entry.from_closure:
func = '__Pyx_RaiseClosureNameError'
self.globalstate.use_utility_code(
ExprNodes.raise_closure_name_error_utility_code)
elif entry.type.is_memoryviewslice and in_nogil_context:
func = '__Pyx_RaiseUnboundMemoryviewSliceNogil'
self.globalstate.use_utility_code(
ExprNodes.raise_unbound_memoryview_utility_code_nogil)
else:
func = '__Pyx_RaiseUnboundLocalError'
self.globalstate.use_utility_code(
ExprNodes.raise_unbound_local_error_utility_code)
self.putln('if (unlikely(!%s)) { %s("%s"); %s }' % (
entry.type.check_for_null_code(entry.cname),
func,
entry.name,
self.error_goto(pos)))
def set_error_info(self, pos, used=False):
self.funcstate.should_declare_error_indicator = True
if used:
self.funcstate.uses_error_indicator = True
return "__PYX_MARK_ERR_POS(%s, %s)" % (
self.lookup_filename(pos[0]),
pos[1])
def error_goto(self, pos, used=True):
lbl = self.funcstate.error_label
self.funcstate.use_label(lbl)
if pos is None:
return 'goto %s;' % lbl
self.funcstate.should_declare_error_indicator = True
if used:
self.funcstate.uses_error_indicator = True
return "__PYX_ERR(%s, %s, %s)" % (
self.lookup_filename(pos[0]),
pos[1],
lbl)
def error_goto_if(self, cond, pos):
return "if (%s) %s" % (self.unlikely(cond), self.error_goto(pos))
def error_goto_if_null(self, cname, pos):
return self.error_goto_if("!%s" % cname, pos)
def error_goto_if_neg(self, cname, pos):
return self.error_goto_if("%s < 0" % cname, pos)
def error_goto_if_PyErr(self, pos):
return self.error_goto_if("PyErr_Occurred()", pos)
def lookup_filename(self, filename):
return self.globalstate.lookup_filename(filename)
def put_declare_refcount_context(self):
self.putln('__Pyx_RefNannyDeclarations')
def put_setup_refcount_context(self, name, acquire_gil=False):
if acquire_gil:
self.globalstate.use_utility_code(
UtilityCode.load_cached("ForceInitThreads", "ModuleSetupCode.c"))
self.putln('__Pyx_RefNannySetupContext("%s", %d);' % (name, acquire_gil and 1 or 0))
def put_finish_refcount_context(self):
self.putln("__Pyx_RefNannyFinishContext();")
def put_add_traceback(self, qualified_name, include_cline=True):
"""
Build a Python traceback for propagating exceptions.
qualified_name should be the qualified name of the function.
"""
format_tuple = (
qualified_name,
Naming.clineno_cname if include_cline else 0,
Naming.lineno_cname,
Naming.filename_cname,
)
self.funcstate.uses_error_indicator = True
self.putln('__Pyx_AddTraceback("%s", %s, %s, %s);' % format_tuple)
def put_unraisable(self, qualified_name, nogil=False):
"""
Generate code to print a Python warning for an unraisable exception.
qualified_name should be the qualified name of the function.
"""
format_tuple = (
qualified_name,
Naming.clineno_cname,
Naming.lineno_cname,
Naming.filename_cname,
self.globalstate.directives['unraisable_tracebacks'],
nogil,
)
self.funcstate.uses_error_indicator = True
self.putln('__Pyx_WriteUnraisable("%s", %s, %s, %s, %d, %d);' % format_tuple)
self.globalstate.use_utility_code(
UtilityCode.load_cached("WriteUnraisableException", "Exceptions.c"))
def put_trace_declarations(self):
self.putln('__Pyx_TraceDeclarations')
def put_trace_frame_init(self, codeobj=None):
if codeobj:
self.putln('__Pyx_TraceFrameInit(%s)' % codeobj)
def put_trace_call(self, name, pos, nogil=False):
self.putln('__Pyx_TraceCall("%s", %s[%s], %s, %d, %s);' % (
name, Naming.filetable_cname, self.lookup_filename(pos[0]), pos[1], nogil, self.error_goto(pos)))
def put_trace_exception(self):
self.putln("__Pyx_TraceException();")
def put_trace_return(self, retvalue_cname, nogil=False):
self.putln("__Pyx_TraceReturn(%s, %d);" % (retvalue_cname, nogil))
def putln_openmp(self, string):
self.putln("#ifdef _OPENMP")
self.putln(string)
self.putln("#endif /* _OPENMP */")
def undef_builtin_expect(self, cond):
"""
Redefine the macros likely() and unlikely to no-ops, depending on
condition 'cond'
"""
self.putln("#if %s" % cond)
self.putln(" #undef likely")
self.putln(" #undef unlikely")
self.putln(" #define likely(x) (x)")
self.putln(" #define unlikely(x) (x)")
self.putln("#endif")
def redef_builtin_expect(self, cond):
self.putln("#if %s" % cond)
self.putln(" #undef likely")
self.putln(" #undef unlikely")
self.putln(" #define likely(x) __builtin_expect(!!(x), 1)")
self.putln(" #define unlikely(x) __builtin_expect(!!(x), 0)")
self.putln("#endif")
class PyrexCodeWriter(object):
# f file output file
# level int indentation level
def __init__(self, outfile_name):
self.f = Utils.open_new_file(outfile_name)
self.level = 0
def putln(self, code):
self.f.write("%s%s\n" % (" " * self.level, code))
def indent(self):
self.level += 1
def dedent(self):
self.level -= 1
class PyxCodeWriter(object):
"""
Can be used for writing out some Cython code. To use the indenter
functionality, the Cython.Compiler.Importer module will have to be used
to load the code to support python 2.4
"""
def __init__(self, buffer=None, indent_level=0, context=None, encoding='ascii'):
self.buffer = buffer or StringIOTree()
self.level = indent_level
self.context = context
self.encoding = encoding
def indent(self, levels=1):
self.level += levels
return True
def dedent(self, levels=1):
self.level -= levels
def indenter(self, line):
"""
Instead of
with pyx_code.indenter("for i in range(10):"):
pyx_code.putln("print i")
write
if pyx_code.indenter("for i in range(10);"):
pyx_code.putln("print i")
pyx_code.dedent()
"""
self.putln(line)
self.indent()
return True
def getvalue(self):
result = self.buffer.getvalue()
if isinstance(result, bytes):
result = result.decode(self.encoding)
return result
def putln(self, line, context=None):
context = context or self.context
if context:
line = sub_tempita(line, context)
self._putln(line)
def _putln(self, line):
self.buffer.write("%s%s\n" % (self.level * " ", line))
def put_chunk(self, chunk, context=None):
context = context or self.context
if context:
chunk = sub_tempita(chunk, context)
chunk = textwrap.dedent(chunk)
for line in chunk.splitlines():
self._putln(line)
def insertion_point(self):
return PyxCodeWriter(self.buffer.insertion_point(), self.level,
self.context)
def named_insertion_point(self, name):
setattr(self, name, self.insertion_point())
class ClosureTempAllocator(object):
def __init__(self, klass):
self.klass = klass
self.temps_allocated = {}
self.temps_free = {}
self.temps_count = 0
def reset(self):
for type, cnames in self.temps_allocated.items():
self.temps_free[type] = list(cnames)
def allocate_temp(self, type):
if type not in self.temps_allocated:
self.temps_allocated[type] = []
self.temps_free[type] = []
elif self.temps_free[type]:
return self.temps_free[type].pop(0)
cname = '%s%d' % (Naming.codewriter_temp_prefix, self.temps_count)
self.klass.declare_var(pos=None, name=cname, cname=cname, type=type, is_cdef=True)
self.temps_allocated[type].append(cname)
self.temps_count += 1
return cname
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/CodeGeneration.py 0000644 0001751 0000171 00000002124 00000000000 022215 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
from .Visitor import VisitorTransform
from .Nodes import StatListNode
class ExtractPxdCode(VisitorTransform):
"""
Finds nodes in a pxd file that should generate code, and
returns them in a StatListNode.
The result is a tuple (StatListNode, ModuleScope), i.e.
everything that is needed from the pxd after it is processed.
A purer approach would be to separately compile the pxd code,
but the result would have to be slightly more sophisticated
than pure strings (functions + wanted interned strings +
wanted utility code + wanted cached objects) so for now this
approach is taken.
"""
def __call__(self, root):
self.funcs = []
self.visitchildren(root)
return (StatListNode(root.pos, stats=self.funcs), root.scope)
def visit_FuncDefNode(self, node):
self.funcs.append(node)
# Do not visit children, nested funcdefnodes will
# also be moved by this action...
return node
def visit_Node(self, node):
self.visitchildren(node)
return node
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/CythonScope.py 0000644 0001751 0000171 00000013613 00000000000 021572 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
from .Symtab import ModuleScope
from .PyrexTypes import *
from .UtilityCode import CythonUtilityCode
from .Errors import error
from .Scanning import StringSourceDescriptor
from . import MemoryView
class CythonScope(ModuleScope):
is_cython_builtin = 1
_cythonscope_initialized = False
def __init__(self, context):
ModuleScope.__init__(self, u'cython', None, None)
self.pxd_file_loaded = True
self.populate_cython_scope()
# The Main.Context object
self.context = context
for fused_type in (cy_integral_type, cy_floating_type, cy_numeric_type):
entry = self.declare_typedef(fused_type.name,
fused_type,
None,
cname='')
entry.in_cinclude = True
def is_cpp(self):
# Allow C++ utility code in C++ contexts.
return self.context.cpp
def lookup_type(self, name):
# This function should go away when types are all first-level objects.
type = parse_basic_type(name)
if type:
return type
return super(CythonScope, self).lookup_type(name)
def lookup(self, name):
entry = super(CythonScope, self).lookup(name)
if entry is None and not self._cythonscope_initialized:
self.load_cythonscope()
entry = super(CythonScope, self).lookup(name)
return entry
def find_module(self, module_name, pos):
error("cython.%s is not available" % module_name, pos)
def find_submodule(self, module_name):
entry = self.entries.get(module_name, None)
if not entry:
self.load_cythonscope()
entry = self.entries.get(module_name, None)
if entry and entry.as_module:
return entry.as_module
else:
# TODO: fix find_submodule control flow so that we're not
# expected to create a submodule here (to protect CythonScope's
# possible immutability). Hack ourselves out of the situation
# for now.
raise error((StringSourceDescriptor(u"cython", u""), 0, 0),
"cython.%s is not available" % module_name)
def lookup_qualified_name(self, qname):
# ExprNode.as_cython_attribute generates qnames and we untangle it here...
name_path = qname.split(u'.')
scope = self
while len(name_path) > 1:
scope = scope.lookup_here(name_path[0])
if scope:
scope = scope.as_module
del name_path[0]
if scope is None:
return None
else:
return scope.lookup_here(name_path[0])
def populate_cython_scope(self):
# These are used to optimize isinstance in FinalOptimizePhase
type_object = self.declare_typedef(
'PyTypeObject',
base_type = c_void_type,
pos = None,
cname = 'PyTypeObject')
type_object.is_void = True
type_object_type = type_object.type
self.declare_cfunction(
'PyObject_TypeCheck',
CFuncType(c_bint_type, [CFuncTypeArg("o", py_object_type, None),
CFuncTypeArg("t", c_ptr_type(type_object_type), None)]),
pos = None,
defining = 1,
cname = 'PyObject_TypeCheck')
def load_cythonscope(self):
"""
Creates some entries for testing purposes and entries for
cython.array() and for cython.view.*.
"""
if self._cythonscope_initialized:
return
self._cythonscope_initialized = True
cython_testscope_utility_code.declare_in_scope(
self, cython_scope=self)
cython_test_extclass_utility_code.declare_in_scope(
self, cython_scope=self)
#
# The view sub-scope
#
self.viewscope = viewscope = ModuleScope(u'view', self, None)
self.declare_module('view', viewscope, None).as_module = viewscope
viewscope.is_cython_builtin = True
viewscope.pxd_file_loaded = True
cythonview_testscope_utility_code.declare_in_scope(
viewscope, cython_scope=self)
view_utility_scope = MemoryView.view_utility_code.declare_in_scope(
self.viewscope, cython_scope=self,
whitelist=MemoryView.view_utility_whitelist)
# self.entries["array"] = view_utility_scope.entries.pop("array")
def create_cython_scope(context):
# One could in fact probably make it a singleton,
# but not sure yet whether any code mutates it (which would kill reusing
# it across different contexts)
return CythonScope(context)
# Load test utilities for the cython scope
def load_testscope_utility(cy_util_name, **kwargs):
return CythonUtilityCode.load(cy_util_name, "TestCythonScope.pyx", **kwargs)
undecorated_methods_protos = UtilityCode(proto=u"""
/* These methods are undecorated and have therefore no prototype */
static PyObject *__pyx_TestClass_cdef_method(
struct __pyx_TestClass_obj *self, int value);
static PyObject *__pyx_TestClass_cpdef_method(
struct __pyx_TestClass_obj *self, int value, int skip_dispatch);
static PyObject *__pyx_TestClass_def_method(
PyObject *self, PyObject *value);
""")
cython_testscope_utility_code = load_testscope_utility("TestScope")
test_cython_utility_dep = load_testscope_utility("TestDep")
cython_test_extclass_utility_code = \
load_testscope_utility("TestClass", name="TestClass",
requires=[undecorated_methods_protos,
test_cython_utility_dep])
cythonview_testscope_utility_code = load_testscope_utility("View.TestScope")
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/DebugFlags.py 0000644 0001751 0000171 00000001157 00000000000 021337 0 ustar 00runner docker 0000000 0000000 # Can be enabled at the command line with --debug-xxx.
debug_disposal_code = 0
debug_temp_alloc = 0
debug_coercion = 0
# Write comments into the C code that show where temporary variables
# are allocated and released.
debug_temp_code_comments = 0
# Write a call trace of the code generation phase into the C code.
debug_trace_code_generation = 0
# Do not replace exceptions with user-friendly error messages.
debug_no_exception_intercept = 0
# Print a message each time a new stage in the pipeline is entered.
debug_verbose_pipeline = 0
# Raise an exception when an error is encountered.
debug_exception_on_error = 0
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.495433
Cython-0.29.28/Cython/Compiler/Errors.py 0000644 0001751 0000171 00000016602 00000000000 020611 0 ustar 00runner docker 0000000 0000000 #
# Errors
#
from __future__ import absolute_import
try:
from __builtin__ import basestring as any_string_type
except ImportError:
any_string_type = (bytes, str)
import sys
from contextlib import contextmanager
from ..Utils import open_new_file
from . import DebugFlags
from . import Options
class PyrexError(Exception):
pass
class PyrexWarning(Exception):
pass
def context(position):
source = position[0]
assert not (isinstance(source, any_string_type)), (
"Please replace filename strings with Scanning.FileSourceDescriptor instances %r" % source)
try:
F = source.get_lines()
except UnicodeDecodeError:
# file has an encoding problem
s = u"[unprintable code]\n"
else:
s = u''.join(F[max(0, position[1]-6):position[1]])
s = u'...\n%s%s^\n' % (s, u' '*(position[2]-1))
s = u'%s\n%s%s\n' % (u'-'*60, s, u'-'*60)
return s
def format_position(position):
if position:
return u"%s:%d:%d: " % (position[0].get_error_description(),
position[1], position[2])
return u''
def format_error(message, position):
if position:
pos_str = format_position(position)
cont = context(position)
message = u'\nError compiling Cython file:\n%s\n%s%s' % (cont, pos_str, message or u'')
return message
class CompileError(PyrexError):
def __init__(self, position = None, message = u""):
self.position = position
self.message_only = message
self.formatted_message = format_error(message, position)
self.reported = False
# Deprecated and withdrawn in 2.6:
# self.message = message
Exception.__init__(self, self.formatted_message)
# Python Exception subclass pickling is broken,
# see http://bugs.python.org/issue1692335
self.args = (position, message)
def __str__(self):
return self.formatted_message
class CompileWarning(PyrexWarning):
def __init__(self, position = None, message = ""):
self.position = position
# Deprecated and withdrawn in 2.6:
# self.message = message
Exception.__init__(self, format_position(position) + message)
class InternalError(Exception):
# If this is ever raised, there is a bug in the compiler.
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Internal compiler error: %s"
% message)
class AbortError(Exception):
# Throw this to stop the compilation immediately.
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Abort error: %s" % message)
class CompilerCrash(CompileError):
# raised when an unexpected exception occurs in a transform
def __init__(self, pos, context, message, cause, stacktrace=None):
if message:
message = u'\n' + message
else:
message = u'\n'
self.message_only = message
if context:
message = u"Compiler crash in %s%s" % (context, message)
if stacktrace:
import traceback
message += (
u'\n\nCompiler crash traceback from this point on:\n' +
u''.join(traceback.format_tb(stacktrace)))
if cause:
if not stacktrace:
message += u'\n'
message += u'%s: %s' % (cause.__class__.__name__, cause)
CompileError.__init__(self, pos, message)
# Python Exception subclass pickling is broken,
# see http://bugs.python.org/issue1692335
self.args = (pos, context, message, cause, stacktrace)
class NoElementTreeInstalledException(PyrexError):
"""raised when the user enabled options.gdb_debug but no ElementTree
implementation was found
"""
listing_file = None
num_errors = 0
echo_file = None
def open_listing_file(path, echo_to_stderr = 1):
# Begin a new error listing. If path is None, no file
# is opened, the error counter is just reset.
global listing_file, num_errors, echo_file
if path is not None:
listing_file = open_new_file(path)
else:
listing_file = None
if echo_to_stderr:
echo_file = sys.stderr
else:
echo_file = None
num_errors = 0
def close_listing_file():
global listing_file
if listing_file:
listing_file.close()
listing_file = None
def report_error(err, use_stack=True):
if error_stack and use_stack:
error_stack[-1].append(err)
else:
global num_errors
# See Main.py for why dual reporting occurs. Quick fix for now.
if err.reported: return
err.reported = True
try: line = u"%s\n" % err
except UnicodeEncodeError:
# Python <= 2.5 does this for non-ASCII Unicode exceptions
line = format_error(getattr(err, 'message_only', "[unprintable exception message]"),
getattr(err, 'position', None)) + u'\n'
if listing_file:
try: listing_file.write(line)
except UnicodeEncodeError:
listing_file.write(line.encode('ASCII', 'replace'))
if echo_file:
try: echo_file.write(line)
except UnicodeEncodeError:
echo_file.write(line.encode('ASCII', 'replace'))
num_errors += 1
if Options.fast_fail:
raise AbortError("fatal errors")
def error(position, message):
#print("Errors.error:", repr(position), repr(message)) ###
if position is None:
raise InternalError(message)
err = CompileError(position, message)
if DebugFlags.debug_exception_on_error: raise Exception(err) # debug
report_error(err)
return err
LEVEL = 1 # warn about all errors level 1 or higher
def message(position, message, level=1):
if level < LEVEL:
return
warn = CompileWarning(position, message)
line = "note: %s\n" % warn
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
return warn
def warning(position, message, level=0):
if level < LEVEL:
return
if Options.warning_errors and position:
return error(position, message)
warn = CompileWarning(position, message)
line = "warning: %s\n" % warn
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
return warn
_warn_once_seen = {}
def warn_once(position, message, level=0):
if level < LEVEL or message in _warn_once_seen:
return
warn = CompileWarning(position, message)
line = "warning: %s\n" % warn
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
_warn_once_seen[message] = True
return warn
# These functions can be used to momentarily suppress errors.
error_stack = []
def hold_errors():
error_stack.append([])
def release_errors(ignore=False):
held_errors = error_stack.pop()
if not ignore:
for err in held_errors:
report_error(err)
def held_errors():
return error_stack[-1]
# same as context manager:
@contextmanager
def local_errors(ignore=False):
errors = []
error_stack.append(errors)
try:
yield errors
finally:
release_errors(ignore=ignore)
# this module needs a redesign to support parallel cythonisation, but
# for now, the following works at least in sequential compiler runs
def reset():
_warn_once_seen.clear()
del error_stack[:]
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.499433
Cython-0.29.28/Cython/Compiler/ExprNodes.py 0000644 0001751 0000171 00002060372 00000000000 021251 0 ustar 00runner docker 0000000 0000000 #
# Parse tree nodes for expressions
#
from __future__ import absolute_import
import cython
cython.declare(error=object, warning=object, warn_once=object, InternalError=object,
CompileError=object, UtilityCode=object, TempitaUtilityCode=object,
StringEncoding=object, operator=object, local_errors=object, report_error=object,
Naming=object, Nodes=object, PyrexTypes=object, py_object_type=object,
list_type=object, tuple_type=object, set_type=object, dict_type=object,
unicode_type=object, str_type=object, bytes_type=object, type_type=object,
Builtin=object, Symtab=object, Utils=object, find_coercion_error=object,
debug_disposal_code=object, debug_temp_alloc=object, debug_coercion=object,
bytearray_type=object, slice_type=object, _py_int_types=object,
IS_PYTHON3=cython.bint)
import re
import sys
import copy
import os.path
import operator
from .Errors import (
error, warning, InternalError, CompileError, report_error, local_errors)
from .Code import UtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from . import Nodes
from .Nodes import Node, utility_code_for_imports, analyse_type_annotation
from . import PyrexTypes
from .PyrexTypes import py_object_type, c_long_type, typecast, error_type, \
unspecified_type
from . import TypeSlots
from .Builtin import list_type, tuple_type, set_type, dict_type, type_type, \
unicode_type, str_type, bytes_type, bytearray_type, basestring_type, slice_type
from . import Builtin
from . import Symtab
from .. import Utils
from .Annotate import AnnotationItem
from . import Future
from ..Debugging import print_call_chain
from .DebugFlags import debug_disposal_code, debug_temp_alloc, \
debug_coercion
from .Pythran import (to_pythran, is_pythran_supported_type, is_pythran_supported_operation_type,
is_pythran_expr, pythran_func_type, pythran_binop_type, pythran_unaryop_type, has_np_pythran,
pythran_indexing_code, pythran_indexing_type, is_pythran_supported_node_or_none, pythran_type,
pythran_is_numpy_func_supported, pythran_get_func_include_file, pythran_functor)
from .PyrexTypes import PythranExpr
try:
from __builtin__ import basestring
except ImportError:
# Python 3
basestring = str
any_string_type = (bytes, str)
else:
# Python 2
any_string_type = (bytes, unicode)
if sys.version_info[0] >= 3:
IS_PYTHON3 = True
_py_int_types = int
else:
IS_PYTHON3 = False
_py_int_types = (int, long)
class NotConstant(object):
_obj = None
def __new__(cls):
if NotConstant._obj is None:
NotConstant._obj = super(NotConstant, cls).__new__(cls)
return NotConstant._obj
def __repr__(self):
return ""
not_a_constant = NotConstant()
constant_value_not_set = object()
# error messages when coercing from key[0] to key[1]
coercion_error_dict = {
# string related errors
(unicode_type, str_type): ("Cannot convert Unicode string to 'str' implicitly."
" This is not portable and requires explicit encoding."),
(unicode_type, bytes_type): "Cannot convert Unicode string to 'bytes' implicitly, encoding required.",
(unicode_type, PyrexTypes.c_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_char_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(unicode_type, PyrexTypes.c_const_uchar_ptr_type): "Unicode objects only support coercion to Py_UNICODE*.",
(bytes_type, unicode_type): "Cannot convert 'bytes' object to unicode implicitly, decoding required",
(bytes_type, str_type): "Cannot convert 'bytes' object to str implicitly. This is not portable to Py3.",
(bytes_type, basestring_type): ("Cannot convert 'bytes' object to basestring implicitly."
" This is not portable to Py3."),
(bytes_type, PyrexTypes.c_py_unicode_ptr_type): "Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'.",
(bytes_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"Cannot convert 'bytes' object to Py_UNICODE*, use 'unicode'."),
(basestring_type, bytes_type): "Cannot convert 'basestring' object to bytes implicitly. This is not portable.",
(str_type, unicode_type): ("str objects do not support coercion to unicode,"
" use a unicode string literal instead (u'')"),
(str_type, bytes_type): "Cannot convert 'str' to 'bytes' implicitly. This is not portable.",
(str_type, PyrexTypes.c_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_char_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_const_uchar_ptr_type): "'str' objects do not support coercion to C types (use 'bytes'?).",
(str_type, PyrexTypes.c_py_unicode_ptr_type): "'str' objects do not support coercion to C types (use 'unicode'?).",
(str_type, PyrexTypes.c_const_py_unicode_ptr_type): (
"'str' objects do not support coercion to C types (use 'unicode'?)."),
(PyrexTypes.c_char_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_char_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
(PyrexTypes.c_uchar_ptr_type, unicode_type): "Cannot convert 'char*' to unicode implicitly, decoding required",
(PyrexTypes.c_const_uchar_ptr_type, unicode_type): (
"Cannot convert 'char*' to unicode implicitly, decoding required"),
}
def find_coercion_error(type_tuple, default, env):
err = coercion_error_dict.get(type_tuple)
if err is None:
return default
elif (env.directives['c_string_encoding'] and
any(t in type_tuple for t in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_uchar_ptr_type,
PyrexTypes.c_const_char_ptr_type, PyrexTypes.c_const_uchar_ptr_type))):
if type_tuple[1].is_pyobject:
return default
elif env.directives['c_string_encoding'] in ('ascii', 'default'):
return default
else:
return "'%s' objects do not support coercion to C types with non-ascii or non-default c_string_encoding" % type_tuple[0].name
else:
return err
def default_str_type(env):
return {
'bytes': bytes_type,
'bytearray': bytearray_type,
'str': str_type,
'unicode': unicode_type
}.get(env.directives['c_string_type'])
def check_negative_indices(*nodes):
"""
Raise a warning on nodes that are known to have negative numeric values.
Used to find (potential) bugs inside of "wraparound=False" sections.
"""
for node in nodes:
if node is None or (
not isinstance(node.constant_result, _py_int_types) and
not isinstance(node.constant_result, float)):
continue
if node.constant_result < 0:
warning(node.pos,
"the result of using negative indices inside of "
"code sections marked as 'wraparound=False' is "
"undefined", level=1)
def infer_sequence_item_type(env, seq_node, index_node=None, seq_type=None):
if not seq_node.is_sequence_constructor:
if seq_type is None:
seq_type = seq_node.infer_type(env)
if seq_type is tuple_type:
# tuples are immutable => we can safely follow assignments
if seq_node.cf_state and len(seq_node.cf_state) == 1:
try:
seq_node = seq_node.cf_state[0].rhs
except AttributeError:
pass
if seq_node is not None and seq_node.is_sequence_constructor:
if index_node is not None and index_node.has_constant_result():
try:
item = seq_node.args[index_node.constant_result]
except (ValueError, TypeError, IndexError):
pass
else:
return item.infer_type(env)
# if we're lucky, all items have the same type
item_types = set([item.infer_type(env) for item in seq_node.args])
if len(item_types) == 1:
return item_types.pop()
return None
def make_dedup_key(outer_type, item_nodes):
"""
Recursively generate a deduplication key from a sequence of values.
Includes Cython node types to work around the fact that (1, 2.0) == (1.0, 2), for example.
@param outer_type: The type of the outer container.
@param item_nodes: A sequence of constant nodes that will be traversed recursively.
@return: A tuple that can be used as a dict key for deduplication.
"""
item_keys = [
(py_object_type, None, type(None)) if node is None
# For sequences and their "mult_factor", see TupleNode.
else make_dedup_key(node.type, [node.mult_factor if node.is_literal else None] + node.args) if node.is_sequence_constructor
else make_dedup_key(node.type, (node.start, node.stop, node.step)) if node.is_slice
# For constants, look at the Python value type if we don't know the concrete Cython type.
else (node.type, node.constant_result,
type(node.constant_result) if node.type is py_object_type else None) if node.has_constant_result()
else None # something we cannot handle => short-circuit below
for node in item_nodes
]
if None in item_keys:
return None
return outer_type, tuple(item_keys)
# Returns a block of code to translate the exception,
# plus a boolean indicating whether to check for Python exceptions.
def get_exception_handler(exception_value):
if exception_value is None:
return "__Pyx_CppExn2PyErr();", False
elif (exception_value.type == PyrexTypes.c_char_type
and exception_value.value == '*'):
return "__Pyx_CppExn2PyErr();", True
elif exception_value.type.is_pyobject:
return (
'try { throw; } catch(const std::exception& exn) {'
'PyErr_SetString(%s, exn.what());'
'} catch(...) { PyErr_SetNone(%s); }' % (
exception_value.entry.cname,
exception_value.entry.cname),
False)
else:
return (
'%s(); if (!PyErr_Occurred())'
'PyErr_SetString(PyExc_RuntimeError, '
'"Error converting c++ exception.");' % (
exception_value.entry.cname),
False)
def maybe_check_py_error(code, check_py_exception, pos, nogil):
if check_py_exception:
if nogil:
code.putln(code.error_goto_if("__Pyx_ErrOccurredWithGIL()", pos))
else:
code.putln(code.error_goto_if("PyErr_Occurred()", pos))
def translate_cpp_exception(code, pos, inside, py_result, exception_value, nogil):
raise_py_exception, check_py_exception = get_exception_handler(exception_value)
code.putln("try {")
code.putln("%s" % inside)
if py_result:
code.putln(code.error_goto_if_null(py_result, pos))
maybe_check_py_error(code, check_py_exception, pos, nogil)
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(raise_py_exception)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Used to handle the case where an lvalue expression and an overloaded assignment
# both have an exception declaration.
def translate_double_cpp_exception(code, pos, lhs_type, lhs_code, rhs_code,
lhs_exc_val, assign_exc_val, nogil):
handle_lhs_exc, lhc_check_py_exc = get_exception_handler(lhs_exc_val)
handle_assignment_exc, assignment_check_py_exc = get_exception_handler(assign_exc_val)
code.putln("try {")
code.putln(lhs_type.declaration_code("__pyx_local_lvalue = %s;" % lhs_code))
maybe_check_py_error(code, lhc_check_py_exc, pos, nogil)
code.putln("try {")
code.putln("__pyx_local_lvalue = %s;" % rhs_code)
maybe_check_py_error(code, assignment_check_py_exc, pos, nogil)
# Catch any exception from the overloaded assignment.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_assignment_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln("}")
# Catch any exception from evaluating lhs.
code.putln("} catch(...) {")
if nogil:
code.put_ensure_gil(declare_gilstate=True)
code.putln(handle_lhs_exc)
if nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(pos))
code.putln('}')
class ExprNode(Node):
# subexprs [string] Class var holding names of subexpr node attrs
# type PyrexType Type of the result
# result_code string Code fragment
# result_ctype string C type of result_code if different from type
# is_temp boolean Result is in a temporary variable
# is_sequence_constructor
# boolean Is a list or tuple constructor expression
# is_starred boolean Is a starred expression (e.g. '*a')
# saved_subexpr_nodes
# [ExprNode or [ExprNode or None] or None]
# Cached result of subexpr_nodes()
# use_managed_ref boolean use ref-counted temps/assignments/etc.
# result_is_used boolean indicates that the result will be dropped and the
# is_numpy_attribute boolean Is a Numpy module attribute
# result_code/temp_result can safely be set to None
# annotation ExprNode or None PEP526 annotation for names or expressions
result_ctype = None
type = None
annotation = None
temp_code = None
old_temp = None # error checker for multiple frees etc.
use_managed_ref = True # can be set by optimisation transforms
result_is_used = True
is_numpy_attribute = False
# The Analyse Expressions phase for expressions is split
# into two sub-phases:
#
# Analyse Types
# Determines the result type of the expression based
# on the types of its sub-expressions, and inserts
# coercion nodes into the expression tree where needed.
# Marks nodes which will need to have temporary variables
# allocated.
#
# Allocate Temps
# Allocates temporary variables where needed, and fills
# in the result_code field of each node.
#
# ExprNode provides some convenience routines which
# perform both of the above phases. These should only
# be called from statement nodes, and only when no
# coercion nodes need to be added around the expression
# being analysed. In that case, the above two phases
# should be invoked separately.
#
# Framework code in ExprNode provides much of the common
# processing for the various phases. It makes use of the
# 'subexprs' class attribute of ExprNodes, which should
# contain a list of the names of attributes which can
# hold sub-nodes or sequences of sub-nodes.
#
# The framework makes use of a number of abstract methods.
# Their responsibilities are as follows.
#
# Declaration Analysis phase
#
# analyse_target_declaration
# Called during the Analyse Declarations phase to analyse
# the LHS of an assignment or argument of a del statement.
# Nodes which cannot be the LHS of an assignment need not
# implement it.
#
# Expression Analysis phase
#
# analyse_types
# - Call analyse_types on all sub-expressions.
# - Check operand types, and wrap coercion nodes around
# sub-expressions where needed.
# - Set the type of this node.
# - If a temporary variable will be required for the
# result, set the is_temp flag of this node.
#
# analyse_target_types
# Called during the Analyse Types phase to analyse
# the LHS of an assignment or argument of a del
# statement. Similar responsibilities to analyse_types.
#
# target_code
# Called by the default implementation of allocate_target_temps.
# Should return a C lvalue for assigning to the node. The default
# implementation calls calculate_result_code.
#
# check_const
# - Check that this node and its subnodes form a
# legal constant expression. If so, do nothing,
# otherwise call not_const.
#
# The default implementation of check_const
# assumes that the expression is not constant.
#
# check_const_addr
# - Same as check_const, except check that the
# expression is a C lvalue whose address is
# constant. Otherwise, call addr_not_const.
#
# The default implementation of calc_const_addr
# assumes that the expression is not a constant
# lvalue.
#
# Code Generation phase
#
# generate_evaluation_code
# - Call generate_evaluation_code for sub-expressions.
# - Perform the functions of generate_result_code
# (see below).
# - If result is temporary, call generate_disposal_code
# on all sub-expressions.
#
# A default implementation of generate_evaluation_code
# is provided which uses the following abstract methods:
#
# generate_result_code
# - Generate any C statements necessary to calculate
# the result of this node from the results of its
# sub-expressions.
#
# calculate_result_code
# - Should return a C code fragment evaluating to the
# result. This is only called when the result is not
# a temporary.
#
# generate_assignment_code
# Called on the LHS of an assignment.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the assignment.
# - If the assignment absorbed a reference, call
# generate_post_assignment_code on the RHS,
# otherwise call generate_disposal_code on it.
#
# generate_deletion_code
# Called on an argument of a del statement.
# - Call generate_evaluation_code for sub-expressions.
# - Generate code to perform the deletion.
# - Call generate_disposal_code on all sub-expressions.
#
#
is_sequence_constructor = False
is_dict_literal = False
is_set_literal = False
is_string_literal = False
is_attribute = False
is_subscript = False
is_slice = False
is_buffer_access = False
is_memview_index = False
is_memview_slice = False
is_memview_broadcast = False
is_memview_copy_assignment = False
saved_subexpr_nodes = None
is_temp = False
is_target = False
is_starred = False
constant_result = constant_value_not_set
child_attrs = property(fget=operator.attrgetter('subexprs'))
def not_implemented(self, method_name):
print_call_chain(method_name, "not implemented") ###
raise InternalError(
"%s.%s not implemented" %
(self.__class__.__name__, method_name))
def is_lvalue(self):
return 0
def is_addressable(self):
return self.is_lvalue() and not self.type.is_memoryviewslice
def is_ephemeral(self):
# An ephemeral node is one whose result is in
# a Python temporary and we suspect there are no
# other references to it. Certain operations are
# disallowed on such values, since they are
# likely to result in a dangling pointer.
return self.type.is_pyobject and self.is_temp
def subexpr_nodes(self):
# Extract a list of subexpression nodes based
# on the contents of the subexprs class attribute.
nodes = []
for name in self.subexprs:
item = getattr(self, name)
if item is not None:
if type(item) is list:
nodes.extend(item)
else:
nodes.append(item)
return nodes
def result(self):
if self.is_temp:
#if not self.temp_code:
# pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
# raise RuntimeError("temp result name not set in %s at %r" % (
# self.__class__.__name__, pos))
return self.temp_code
else:
return self.calculate_result_code()
def pythran_result(self, type_=None):
if is_pythran_supported_node_or_none(self):
return to_pythran(self)
assert(type_ is not None)
return to_pythran(self, type_)
def is_c_result_required(self):
"""
Subtypes may return False here if result temp allocation can be skipped.
"""
return True
def result_as(self, type = None):
# Return the result code cast to the specified C type.
if (self.is_temp and self.type.is_pyobject and
type != py_object_type):
# Allocated temporaries are always PyObject *, which may not
# reflect the actual type (e.g. an extension type)
return typecast(type, py_object_type, self.result())
return typecast(type, self.ctype(), self.result())
def py_result(self):
# Return the result code cast to PyObject *.
return self.result_as(py_object_type)
def ctype(self):
# Return the native C type of the result (i.e. the
# C type of the result_code expression).
return self.result_ctype or self.type
def get_constant_c_result_code(self):
# Return the constant value of this node as a result code
# string, or None if the node is not constant. This method
# can be called when the constant result code is required
# before the code generation phase.
#
# The return value is a string that can represent a simple C
# value, a constant C name or a constant C expression. If the
# node type depends on Python code, this must return None.
return None
def calculate_constant_result(self):
# Calculate the constant compile time result value of this
# expression and store it in ``self.constant_result``. Does
# nothing by default, thus leaving ``self.constant_result``
# unknown. If valid, the result can be an arbitrary Python
# value.
#
# This must only be called when it is assured that all
# sub-expressions have a valid constant_result value. The
# ConstantFolding transform will do this.
pass
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def compile_time_value(self, denv):
# Return value of compile-time expression, or report error.
error(self.pos, "Invalid compile-time expression")
def compile_time_value_error(self, e):
error(self.pos, "Error in compile-time expression: %s: %s" % (
e.__class__.__name__, e))
# ------------- Declaration Analysis ----------------
def analyse_target_declaration(self, env):
error(self.pos, "Cannot assign to or delete this")
# ------------- Expression Analysis ----------------
def analyse_const_expression(self, env):
# Called during the analyse_declarations phase of a
# constant expression. Analyses the expression's type,
# checks whether it is a legal const expression,
# and determines its value.
node = self.analyse_types(env)
node.check_const()
return node
def analyse_expressions(self, env):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for a whole
# expression.
return self.analyse_types(env)
def analyse_target_expression(self, env, rhs):
# Convenience routine performing both the Type
# Analysis and Temp Allocation phases for the LHS of
# an assignment.
return self.analyse_target_types(env)
def analyse_boolean_expression(self, env):
# Analyse expression and coerce to a boolean.
node = self.analyse_types(env)
bool = node.coerce_to_boolean(env)
return bool
def analyse_temp_boolean_expression(self, env):
# Analyse boolean expression and coerce result into
# a temporary. This is used when a branch is to be
# performed on the result and we won't have an
# opportunity to ensure disposal code is executed
# afterwards. By forcing the result into a temporary,
# we ensure that all disposal has been done by the
# time we get the result.
node = self.analyse_types(env)
return node.coerce_to_boolean(env).coerce_to_simple(env)
# --------------- Type Inference -----------------
def type_dependencies(self, env):
# Returns the list of entries whose types must be determined
# before the type of self can be inferred.
if hasattr(self, 'type') and self.type is not None:
return ()
return sum([node.type_dependencies(env) for node in self.subexpr_nodes()], ())
def infer_type(self, env):
# Attempt to deduce the type of self.
# Differs from analyse_types as it avoids unnecessary
# analysis of subexpressions, but can assume everything
# in self.type_dependencies() has been resolved.
if hasattr(self, 'type') and self.type is not None:
return self.type
elif hasattr(self, 'entry') and self.entry is not None:
return self.entry.type
else:
self.not_implemented("infer_type")
def nonlocally_immutable(self):
# Returns whether this variable is a safe reference, i.e.
# can't be modified as part of globals or closures.
return self.is_literal or self.is_temp or self.type.is_array or self.type.is_cfunction
def inferable_item_node(self, index=0):
"""
Return a node that represents the (type) result of an indexing operation,
e.g. for tuple unpacking or iteration.
"""
return IndexNode(self.pos, base=self, index=IntNode(
self.pos, value=str(index), constant_result=index, type=PyrexTypes.c_py_ssize_t_type))
# --------------- Type Analysis ------------------
def analyse_as_module(self, env):
# If this node can be interpreted as a reference to a
# cimported module, return its scope, else None.
return None
def analyse_as_type(self, env):
# If this node can be interpreted as a reference to a
# type, return that type, else None.
return None
def analyse_as_extension_type(self, env):
# If this node can be interpreted as a reference to an
# extension type or builtin type, return its type, else None.
return None
def analyse_types(self, env):
self.not_implemented("analyse_types")
def analyse_target_types(self, env):
return self.analyse_types(env)
def nogil_check(self, env):
# By default, any expression based on Python objects is
# prevented in nogil environments. Subtypes must override
# this if they can work without the GIL.
if self.type and self.type.is_pyobject:
self.gil_error()
def gil_assignment_check(self, env):
if env.nogil and self.type.is_pyobject:
error(self.pos, "Assignment of Python object not allowed without gil")
def check_const(self):
self.not_const()
return False
def not_const(self):
error(self.pos, "Not allowed in a constant expression")
def check_const_addr(self):
self.addr_not_const()
return False
def addr_not_const(self):
error(self.pos, "Address is not constant")
# ----------------- Result Allocation -----------------
def result_in_temp(self):
# Return true if result is in a temporary owned by
# this node or one of its subexpressions. Overridden
# by certain nodes which can share the result of
# a subnode.
return self.is_temp
def target_code(self):
# Return code fragment for use as LHS of a C assignment.
return self.calculate_result_code()
def calculate_result_code(self):
self.not_implemented("calculate_result_code")
# def release_target_temp(self, env):
# # Release temporaries used by LHS of an assignment.
# self.release_subexpr_temps(env)
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("Temp allocated multiple times in %r: %r" % (self.__class__.__name__, self.pos))
type = self.type
if not type.is_void:
if type.is_pyobject:
type = PyrexTypes.py_object_type
elif not (self.result_is_used or type.is_memoryviewslice or self.is_c_result_required()):
self.temp_code = None
return
self.temp_code = code.funcstate.allocate_temp(
type, manage_ref=self.use_managed_ref)
else:
self.temp_code = None
def release_temp_result(self, code):
if not self.temp_code:
if not self.result_is_used:
# not used anyway, so ignore if not set up
return
pos = (os.path.basename(self.pos[0].get_description()),) + self.pos[1:] if self.pos else '(?)'
if self.old_temp:
raise RuntimeError("temp %s released multiple times in %s at %r" % (
self.old_temp, self.__class__.__name__, pos))
else:
raise RuntimeError("no temp, but release requested in %s at %r" % (
self.__class__.__name__, pos))
code.funcstate.release_temp(self.temp_code)
self.old_temp = self.temp_code
self.temp_code = None
# ---------------- Code Generation -----------------
def make_owned_reference(self, code):
"""
If result is a pyobject, make sure we own a reference to it.
If the result is in a temp, it is already a new reference.
"""
if self.type.is_pyobject and not self.result_in_temp():
code.put_incref(self.result(), self.ctype())
def make_owned_memoryviewslice(self, code):
"""
Make sure we own the reference to this memoryview slice.
"""
if not self.result_in_temp():
code.put_incref_memoryviewslice(self.result(),
have_gil=self.in_nogil_context)
def generate_evaluation_code(self, code):
# Generate code to evaluate this node and
# its sub-expressions, and dispose of any
# temporary results of its sub-expressions.
self.generate_subexpr_evaluation_code(code)
code.mark_pos(self.pos)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_result_code(code)
if self.is_temp and not (self.type.is_string or self.type.is_pyunicode_ptr):
# If we are temp we do not need to wait until this node is disposed
# before disposing children.
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def generate_subexpr_evaluation_code(self, code):
for node in self.subexpr_nodes():
node.generate_evaluation_code(code)
def generate_result_code(self, code):
self.not_implemented("generate_result_code")
def generate_disposal_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
if self.result():
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=not self.in_nogil_context)
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
# Already done if self.is_temp
self.generate_subexpr_disposal_code(code)
def generate_subexpr_disposal_code(self, code):
# Generate code to dispose of temporary results
# of all sub-expressions.
for node in self.subexpr_nodes():
node.generate_disposal_code(code)
def generate_post_assignment_code(self, code):
if self.is_temp:
if self.type.is_string or self.type.is_pyunicode_ptr:
# postponed from self.generate_evaluation_code()
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
elif self.type.is_pyobject:
code.putln("%s = 0;" % self.result())
elif self.type.is_memoryviewslice:
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
self.generate_subexpr_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
# Stub method for nodes which are not legal as
# the LHS of an assignment. An error will have
# been reported earlier.
pass
def generate_deletion_code(self, code, ignore_nonexisting=False):
# Stub method for nodes that are not legal as
# the argument of a del statement. An error
# will have been reported earlier.
pass
def free_temps(self, code):
if self.is_temp:
if not self.type.is_void:
self.release_temp_result(code)
else:
self.free_subexpr_temps(code)
def free_subexpr_temps(self, code):
for sub in self.subexpr_nodes():
sub.free_temps(code)
def generate_function_definitions(self, env, code):
pass
# ---------------- Annotation ---------------------
def annotate(self, code):
for node in self.subexpr_nodes():
node.annotate(code)
# ----------------- Coercion ----------------------
def coerce_to(self, dst_type, env):
# Coerce the result so that it can be assigned to
# something of type dst_type. If processing is necessary,
# wraps this node in a coercion node and returns that.
# Otherwise, returns this node unchanged.
#
# This method is called during the analyse_expressions
# phase of the src_node's processing.
#
# Note that subclasses that override this (especially
# ConstNodes) must not (re-)set their own .type attribute
# here. Since expression nodes may turn up in different
# places in the tree (e.g. inside of CloneNodes in cascaded
# assignments), this method must return a new node instance
# if it changes the type.
#
src = self
src_type = self.type
if self.check_for_coercion_error(dst_type, env):
return self
used_as_reference = dst_type.is_reference
if used_as_reference and not src_type.is_reference:
dst_type = dst_type.ref_base_type
if src_type.is_const:
src_type = src_type.const_base_type
if src_type.is_fused or dst_type.is_fused:
# See if we are coercing a fused function to a pointer to a
# specialized function
if (src_type.is_cfunction and not dst_type.is_fused and
dst_type.is_ptr and dst_type.base_type.is_cfunction):
dst_type = dst_type.base_type
for signature in src_type.get_all_specialized_function_types():
if signature.same_as(dst_type):
src.type = signature
src.entry = src.type.entry
src.entry.used = True
return self
if src_type.is_fused:
error(self.pos, "Type is not specialized")
elif src_type.is_null_ptr and dst_type.is_ptr:
# NULL can be implicitly cast to any pointer type
return self
else:
error(self.pos, "Cannot coerce to a type that is not specialized")
self.type = error_type
return self
if self.coercion_type is not None:
# This is purely for error checking purposes!
node = NameNode(self.pos, name='', type=self.coercion_type)
node.coerce_to(dst_type, env)
if dst_type.is_memoryviewslice:
from . import MemoryView
if not src.type.is_memoryviewslice:
if src.type.is_pyobject:
src = CoerceToMemViewSliceNode(src, dst_type, env)
elif src.type.is_array:
src = CythonArrayNode.from_carray(src, env).coerce_to(dst_type, env)
elif not src_type.is_error:
error(self.pos,
"Cannot convert '%s' to memoryviewslice" % (src_type,))
else:
if src.type.writable_needed:
dst_type.writable_needed = True
if not src.type.conforms_to(dst_type, broadcast=self.is_memview_broadcast,
copying=self.is_memview_copy_assignment):
if src.type.dtype.same_as(dst_type.dtype):
msg = "Memoryview '%s' not conformable to memoryview '%s'."
tup = src.type, dst_type
else:
msg = "Different base types for memoryviews (%s, %s)"
tup = src.type.dtype, dst_type.dtype
error(self.pos, msg % tup)
elif dst_type.is_pyobject:
if not src.type.is_pyobject:
if dst_type is bytes_type and src.type.is_int:
src = CoerceIntToBytesNode(src, env)
else:
src = CoerceToPyTypeNode(src, env, type=dst_type)
if not src.type.subtype_of(dst_type):
if src.constant_result is not None:
src = PyTypeTestNode(src, dst_type, env)
elif is_pythran_expr(dst_type) and is_pythran_supported_type(src.type):
# We let the compiler decide whether this is valid
return src
elif is_pythran_expr(src.type):
if is_pythran_supported_type(dst_type):
# Match the case were a pythran expr is assigned to a value, or vice versa.
# We let the C++ compiler decide whether this is valid or not!
return src
# Else, we need to convert the Pythran expression to a Python object
src = CoerceToPyTypeNode(src, env, type=dst_type)
elif src.type.is_pyobject:
if used_as_reference and dst_type.is_cpp_class:
warning(
self.pos,
"Cannot pass Python object as C++ data structure reference (%s &), will pass by copy." % dst_type)
src = CoerceFromPyTypeNode(dst_type, src, env)
elif (dst_type.is_complex
and src_type != dst_type
and dst_type.assignable_from(src_type)):
src = CoerceToComplexNode(src, dst_type, env)
else: # neither src nor dst are py types
# Added the string comparison, since for c types that
# is enough, but Cython gets confused when the types are
# in different pxi files.
# TODO: Remove this hack and require shared declarations.
if not (src.type == dst_type or str(src.type) == str(dst_type) or dst_type.assignable_from(src_type)):
self.fail_assignment(dst_type)
return src
def fail_assignment(self, dst_type):
error(self.pos, "Cannot assign type '%s' to '%s'" % (self.type, dst_type))
def check_for_coercion_error(self, dst_type, env, fail=False, default=None):
if fail and not default:
default = "Cannot assign type '%(FROM)s' to '%(TO)s'"
message = find_coercion_error((self.type, dst_type), default, env)
if message is not None:
error(self.pos, message % {'FROM': self.type, 'TO': dst_type})
return True
if fail:
self.fail_assignment(dst_type)
return True
return False
def coerce_to_pyobject(self, env):
return self.coerce_to(PyrexTypes.py_object_type, env)
def coerce_to_boolean(self, env):
# Coerce result to something acceptable as
# a boolean value.
# if it's constant, calculate the result now
if self.has_constant_result():
bool_value = bool(self.constant_result)
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
type = self.type
if type.is_enum or type.is_error:
return self
elif type.is_pyobject or type.is_int or type.is_ptr or type.is_float:
return CoerceToBooleanNode(self, env)
elif type.is_cpp_class and type.scope and type.scope.lookup("operator bool"):
return SimpleCallNode(
self.pos,
function=AttributeNode(
self.pos, obj=self, attribute=StringEncoding.EncodedString('operator bool')),
args=[]).analyse_types(env)
elif type.is_ctuple:
bool_value = len(type.components) == 0
return BoolNode(self.pos, value=bool_value,
constant_result=bool_value)
else:
error(self.pos, "Type '%s' not acceptable as a boolean" % type)
return self
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.type.is_int:
return self
else:
return self.coerce_to(PyrexTypes.c_long_type, env)
def coerce_to_temp(self, env):
# Ensure that the result is in a temporary.
if self.result_in_temp():
return self
else:
return CoerceToTempNode(self, env)
def coerce_to_simple(self, env):
# Ensure that the result is simple (see is_simple).
if self.is_simple():
return self
else:
return self.coerce_to_temp(env)
def is_simple(self):
# A node is simple if its result is something that can
# be referred to without performing any operations, e.g.
# a constant, local var, C global var, struct member
# reference, or temporary.
return self.result_in_temp()
def may_be_none(self):
if self.type and not (self.type.is_pyobject or
self.type.is_memoryviewslice):
return False
if self.has_constant_result():
return self.constant_result is not None
return True
def as_cython_attribute(self):
return None
def as_none_safe_node(self, message, error="PyExc_TypeError", format_args=()):
# Wraps the node in a NoneCheckNode if it is not known to be
# not-None (e.g. because it is a Python literal).
if self.may_be_none():
return NoneCheckNode(self, error, message, format_args)
else:
return self
@classmethod
def from_node(cls, node, **kwargs):
"""Instantiate this node class from another node, properly
copying over all attributes that one would forget otherwise.
"""
attributes = "cf_state cf_maybe_null cf_is_null constant_result".split()
for attr_name in attributes:
if attr_name in kwargs:
continue
try:
value = getattr(node, attr_name)
except AttributeError:
pass
else:
kwargs[attr_name] = value
return cls(node.pos, **kwargs)
class AtomicExprNode(ExprNode):
# Abstract base class for expression nodes which have
# no sub-expressions.
subexprs = []
# Override to optimize -- we know we have no children
def generate_subexpr_evaluation_code(self, code):
pass
def generate_subexpr_disposal_code(self, code):
pass
class PyConstNode(AtomicExprNode):
# Abstract base class for constant Python values.
is_literal = 1
type = py_object_type
def is_simple(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self
def calculate_result_code(self):
return self.value
def generate_result_code(self, code):
pass
class NoneNode(PyConstNode):
# The constant value None
is_none = 1
value = "Py_None"
constant_result = None
nogil_check = None
def compile_time_value(self, denv):
return None
def may_be_none(self):
return True
def coerce_to(self, dst_type, env):
if not (dst_type.is_pyobject or dst_type.is_memoryviewslice or dst_type.is_error):
# Catch this error early and loudly.
error(self.pos, "Cannot assign None to %s" % dst_type)
return super(NoneNode, self).coerce_to(dst_type, env)
class EllipsisNode(PyConstNode):
# '...' in a subscript list.
value = "Py_Ellipsis"
constant_result = Ellipsis
def compile_time_value(self, denv):
return Ellipsis
class ConstNode(AtomicExprNode):
# Abstract base type for literal constant nodes.
#
# value string C code fragment
is_literal = 1
nogil_check = None
def is_simple(self):
return 1
def nonlocally_immutable(self):
return 1
def may_be_none(self):
return False
def analyse_types(self, env):
return self # Types are held in class variables
def check_const(self):
return True
def get_constant_c_result_code(self):
return self.calculate_result_code()
def calculate_result_code(self):
return str(self.value)
def generate_result_code(self, code):
pass
class BoolNode(ConstNode):
type = PyrexTypes.c_bint_type
# The constant value True or False
def calculate_constant_result(self):
self.constant_result = self.value
def compile_time_value(self, denv):
return self.value
def calculate_result_code(self):
if self.type.is_pyobject:
return self.value and 'Py_True' or 'Py_False'
else:
return str(int(self.value))
def coerce_to(self, dst_type, env):
if dst_type == self.type:
return self
if dst_type is py_object_type and self.type is Builtin.bool_type:
return self
if dst_type.is_pyobject and self.type.is_int:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.bool_type)
if dst_type.is_int and self.type.is_pyobject:
return BoolNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type)
return ConstNode.coerce_to(self, dst_type, env)
class NullNode(ConstNode):
type = PyrexTypes.c_null_ptr_type
value = "NULL"
constant_result = 0
def get_constant_c_result_code(self):
return self.value
class CharNode(ConstNode):
type = PyrexTypes.c_char_type
def calculate_constant_result(self):
self.constant_result = ord(self.value)
def compile_time_value(self, denv):
return ord(self.value)
def calculate_result_code(self):
return "'%s'" % StringEncoding.escape_char(self.value)
class IntNode(ConstNode):
# unsigned "" or "U"
# longness "" or "L" or "LL"
# is_c_literal True/False/None creator considers this a C integer literal
unsigned = ""
longness = ""
is_c_literal = None # unknown
def __init__(self, pos, **kwds):
ExprNode.__init__(self, pos, **kwds)
if 'type' not in kwds:
self.type = self.find_suitable_type_for_value()
def find_suitable_type_for_value(self):
if self.constant_result is constant_value_not_set:
try:
self.calculate_constant_result()
except ValueError:
pass
# we ignore 'is_c_literal = True' and instead map signed 32bit
# integers as C long values
if self.is_c_literal or \
not self.has_constant_result() or \
self.unsigned or self.longness == 'LL':
# clearly a C literal
rank = (self.longness == 'LL') and 2 or 1
suitable_type = PyrexTypes.modifiers_and_name_to_type[not self.unsigned, rank, "int"]
if self.type:
suitable_type = PyrexTypes.widest_numeric_type(suitable_type, self.type)
else:
# C literal or Python literal - split at 32bit boundary
if -2**31 <= self.constant_result < 2**31:
if self.type and self.type.is_int:
suitable_type = self.type
else:
suitable_type = PyrexTypes.c_long_type
else:
suitable_type = PyrexTypes.py_object_type
return suitable_type
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
elif dst_type.is_float:
if self.has_constant_result():
return FloatNode(self.pos, value='%d.0' % int(self.constant_result), type=dst_type,
constant_result=float(self.constant_result))
else:
return FloatNode(self.pos, value=self.value, type=dst_type,
constant_result=not_a_constant)
if dst_type.is_numeric and not dst_type.is_complex:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=dst_type, is_c_literal=True,
unsigned=self.unsigned, longness=self.longness)
return node
elif dst_type.is_pyobject:
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
type=PyrexTypes.py_object_type, is_c_literal=False,
unsigned=self.unsigned, longness=self.longness)
else:
# FIXME: not setting the type here to keep it working with
# complex numbers. Should they be special cased?
node = IntNode(self.pos, value=self.value, constant_result=self.constant_result,
unsigned=self.unsigned, longness=self.longness)
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def coerce_to_boolean(self, env):
return IntNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=PyrexTypes.c_bint_type,
unsigned=self.unsigned, longness=self.longness)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# pre-allocate a Python version of the number
plain_integer_string = str(Utils.str_to_number(self.value))
self.result_code = code.get_py_int(plain_integer_string, self.longness)
else:
self.result_code = self.get_constant_c_result_code()
def get_constant_c_result_code(self):
unsigned, longness = self.unsigned, self.longness
literal = self.value_as_c_integer_string()
if not (unsigned or longness) and self.type.is_int and literal[0] == '-' and literal[1] != '0':
# negative decimal literal => guess longness from type to prevent wrap-around
if self.type.rank >= PyrexTypes.c_longlong_type.rank:
longness = 'LL'
elif self.type.rank >= PyrexTypes.c_long_type.rank:
longness = 'L'
return literal + unsigned + longness
def value_as_c_integer_string(self):
value = self.value
if len(value) <= 2:
# too short to go wrong (and simplifies code below)
return value
neg_sign = ''
if value[0] == '-':
neg_sign = '-'
value = value[1:]
if value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
# 0x123 hex literals and 0123 octal literals work nicely in C
# but C-incompatible Py3 oct/bin notations need conversion
if neg_sign and literal_type in 'oOxX0123456789' and value[2:].isdigit():
# negative hex/octal literal => prevent C compiler from using
# unsigned integer types by converting to decimal (see C standard 6.4.4.1)
value = str(Utils.str_to_number(value))
elif literal_type in 'oO':
value = '0' + value[2:] # '0o123' => '0123'
elif literal_type in 'bB':
value = str(int(value[2:], 2))
elif value.isdigit() and not self.unsigned and not self.longness:
if not neg_sign:
# C compilers do not consider unsigned types for decimal literals,
# but they do for hex (see C standard 6.4.4.1)
value = '0x%X' % int(value)
return neg_sign + value
def calculate_result_code(self):
return self.result_code
def calculate_constant_result(self):
self.constant_result = Utils.str_to_number(self.value)
def compile_time_value(self, denv):
return Utils.str_to_number(self.value)
class FloatNode(ConstNode):
type = PyrexTypes.c_double_type
def calculate_constant_result(self):
self.constant_result = float(self.value)
def compile_time_value(self, denv):
return float(self.value)
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject and self.type.is_float:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=Builtin.float_type)
if dst_type.is_float and self.type.is_pyobject:
return FloatNode(
self.pos, value=self.value,
constant_result=self.constant_result,
type=dst_type)
return ConstNode.coerce_to(self, dst_type, env)
def calculate_result_code(self):
return self.result_code
def get_constant_c_result_code(self):
strval = self.value
assert isinstance(strval, basestring)
cmpval = repr(float(strval))
if cmpval == 'nan':
return "(Py_HUGE_VAL * 0)"
elif cmpval == 'inf':
return "Py_HUGE_VAL"
elif cmpval == '-inf':
return "(-Py_HUGE_VAL)"
else:
return strval
def generate_evaluation_code(self, code):
c_value = self.get_constant_c_result_code()
if self.type.is_pyobject:
self.result_code = code.get_py_float(self.value, c_value)
else:
self.result_code = c_value
def _analyse_name_as_type(name, pos, env):
type = PyrexTypes.parse_basic_type(name)
if type is not None:
return type
global_entry = env.global_scope().lookup(name)
if global_entry and global_entry.type and (
global_entry.type.is_extension_type
or global_entry.type.is_struct_or_union
or global_entry.type.is_builtin_type
or global_entry.type.is_cpp_class):
return global_entry.type
from .TreeFragment import TreeFragment
with local_errors(ignore=True):
pos = (pos[0], pos[1], pos[2]-7)
try:
declaration = TreeFragment(u"sizeof(%s)" % name, name=pos[0].filename, initial_pos=pos)
except CompileError:
pass
else:
sizeof_node = declaration.root.stats[0].expr
if isinstance(sizeof_node, SizeofTypeNode):
sizeof_node = sizeof_node.analyse_types(env)
if isinstance(sizeof_node, SizeofTypeNode):
return sizeof_node.arg_type
return None
class BytesNode(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding)
return BytesNode(self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value.byteencode()
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type):
node.type = (PyrexTypes.c_const_char_ptr_type if dst_type == PyrexTypes.c_const_uchar_ptr_type
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
# Exclude the case of passing a C string literal into a non-const C++ string.
if not dst_type.is_cpp_class or dst_type.is_const:
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
result = code.get_py_string_const(self.value)
elif self.type.is_const:
result = code.get_string_const(self.value)
else:
# not const => use plain C string literal and cast to mutable type
literal = self.value.as_c_string_literal()
# C++ may require a cast
result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal)
self.result_code = result
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
class UnicodeNode(ConstNode):
# A Py_UNICODE* or unicode literal
#
# value EncodedString
# bytes_value BytesLiteral the literal parsed as bytes string
# ('-3' unicode literals only)
is_string_literal = True
bytes_value = None
type = unicode_type
def calculate_constant_result(self):
self.constant_result = self.value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value, self.pos, env)
def as_sliced_node(self, start, stop, step=None):
if StringEncoding.string_contains_surrogates(self.value[:stop]):
# this is unsafe as it may give different results
# in different runtimes
return None
value = StringEncoding.EncodedString(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.bytes_value is not None:
bytes_value = StringEncoding.bytes_literal(
self.bytes_value[start:stop:step], self.bytes_value.encoding)
else:
bytes_value = None
return UnicodeNode(
self.pos, value=value, bytes_value=bytes_value,
constant_result=value)
def coerce_to(self, dst_type, env):
if dst_type is self.type:
pass
elif dst_type.is_unicode_char:
if not self.can_coerce_to_char_literal():
error(self.pos,
"Only single-character Unicode string literals or "
"surrogate pairs can be coerced into Py_UCS4/Py_UNICODE.")
return self
int_value = ord(self.value)
return IntNode(self.pos, type=dst_type, value=str(int_value),
constant_result=int_value)
elif not dst_type.is_pyobject:
if dst_type.is_string and self.bytes_value is not None:
# special case: '-3' enforced unicode literal used in a
# C char* context
return BytesNode(self.pos, value=self.bytes_value
).coerce_to(dst_type, env)
if dst_type.is_pyunicode_ptr:
node = UnicodeNode(self.pos, value=self.value)
node.type = dst_type
return node
error(self.pos,
"Unicode literals do not support coercion to C types other "
"than Py_UNICODE/Py_UCS4 (for characters) or Py_UNICODE* "
"(for strings).")
elif dst_type not in (py_object_type, Builtin.basestring_type):
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return len(self.value) == 1
## or (len(self.value) == 2
## and (0xD800 <= self.value[0] <= 0xDBFF)
## and (0xDC00 <= self.value[1] <= 0xDFFF))
def coerce_to_boolean(self, env):
bool_value = bool(self.value)
return BoolNode(self.pos, value=bool_value, constant_result=bool_value)
def contains_surrogates(self):
return StringEncoding.string_contains_surrogates(self.value)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
# FIXME: this should go away entirely!
# Since string_contains_lone_surrogates() returns False for surrogate pairs in Py2/UCS2,
# Py2 can generate different code from Py3 here. Let's hope we get away with claiming that
# the processing of surrogate pairs in code was always ambiguous and lead to different results
# on P16/32bit Unicode platforms.
if StringEncoding.string_contains_lone_surrogates(self.value):
# lone (unpaired) surrogates are not really portable and cannot be
# decoded by the UTF-8 codec in Py3.3
self.result_code = code.get_py_const(py_object_type, 'ustring')
data_cname = code.get_string_const(
StringEncoding.BytesLiteral(self.value.encode('unicode_escape')))
const_code = code.get_cached_constants_writer(self.result_code)
if const_code is None:
return # already initialised
const_code.mark_pos(self.pos)
const_code.putln(
"%s = PyUnicode_DecodeUnicodeEscape(%s, sizeof(%s) - 1, NULL); %s" % (
self.result_code,
data_cname,
data_cname,
const_code.error_goto_if_null(self.result_code, self.pos)))
const_code.put_error_if_neg(
self.pos, "__Pyx_PyUnicode_READY(%s)" % self.result_code)
else:
self.result_code = code.get_py_string_const(self.value)
else:
self.result_code = code.get_pyunicode_ptr_const(self.value)
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
return self.value
class StringNode(PyConstNode):
# A Python str object, i.e. a byte string in Python 2.x and a
# unicode string in Python 3.x
#
# value BytesLiteral (or EncodedString with ASCII content)
# unicode_value EncodedString or None
# is_identifier boolean
type = str_type
is_string_literal = True
is_identifier = None
unicode_value = None
def calculate_constant_result(self):
if self.unicode_value is not None:
# only the Unicode value is portable across Py2/3
self.constant_result = self.unicode_value
def analyse_as_type(self, env):
return _analyse_name_as_type(self.unicode_value or self.value.decode('ISO8859-1'), self.pos, env)
def as_sliced_node(self, start, stop, step=None):
value = type(self.value)(self.value[start:stop:step])
value.encoding = self.value.encoding
if self.unicode_value is not None:
if StringEncoding.string_contains_surrogates(self.unicode_value[:stop]):
# this is unsafe as it may give different results in different runtimes
return None
unicode_value = StringEncoding.EncodedString(
self.unicode_value[start:stop:step])
else:
unicode_value = None
return StringNode(
self.pos, value=value, unicode_value=unicode_value,
constant_result=value, is_identifier=self.is_identifier)
def coerce_to(self, dst_type, env):
if dst_type is not py_object_type and not str_type.subtype_of(dst_type):
# if dst_type is Builtin.bytes_type:
# # special case: bytes = 'str literal'
# return BytesNode(self.pos, value=self.value)
if not dst_type.is_pyobject:
return BytesNode(self.pos, value=self.value).coerce_to(dst_type, env)
if dst_type is not Builtin.basestring_type:
self.check_for_coercion_error(dst_type, env, fail=True)
return self
def can_coerce_to_char_literal(self):
return not self.is_identifier and len(self.value) == 1
def generate_evaluation_code(self, code):
self.result_code = code.get_py_string_const(
self.value, identifier=self.is_identifier, is_str=True,
unicode_value=self.unicode_value)
def get_constant_c_result_code(self):
return None
def calculate_result_code(self):
return self.result_code
def compile_time_value(self, env):
if self.value.is_unicode:
return self.value
if not IS_PYTHON3:
# use plain str/bytes object in Py2
return self.value.byteencode()
# in Py3, always return a Unicode string
if self.unicode_value is not None:
return self.unicode_value
return self.value.decode('iso8859-1')
class IdentifierStringNode(StringNode):
# A special str value that represents an identifier (bytes in Py2,
# unicode in Py3).
is_identifier = True
class ImagNode(AtomicExprNode):
# Imaginary number literal
#
# value string imaginary part (float value)
type = PyrexTypes.c_double_complex_type
def calculate_constant_result(self):
self.constant_result = complex(0.0, float(self.value))
def compile_time_value(self, denv):
return complex(0.0, float(self.value))
def analyse_types(self, env):
self.type.create_declaration_utility_code(env)
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if self.type is dst_type:
return self
node = ImagNode(self.pos, value=self.value)
if dst_type.is_pyobject:
node.is_temp = 1
node.type = Builtin.complex_type
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return AtomicExprNode.coerce_to(node, dst_type, env)
gil_message = "Constructing complex number"
def calculate_result_code(self):
if self.type.is_pyobject:
return self.result()
else:
return "%s(0, %r)" % (self.type.from_parts, float(self.value))
def generate_result_code(self, code):
if self.type.is_pyobject:
code.putln(
"%s = PyComplex_FromDoubles(0.0, %r); %s" % (
self.result(),
float(self.value),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class NewExprNode(AtomicExprNode):
# C++ new statement
#
# cppclass node c++ class to create
type = None
def infer_type(self, env):
type = self.cppclass.analyse_as_type(env)
if type is None or not type.is_cpp_class:
error(self.pos, "new operator can only be applied to a C++ class")
self.type = error_type
return
self.cpp_check(env)
constructor = type.get_constructor(self.pos)
self.class_type = type
self.entry = constructor
self.type = constructor.type
return self.type
def analyse_types(self, env):
if self.type is None:
self.infer_type(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
pass
def calculate_result_code(self):
return "new " + self.class_type.empty_declaration_code()
class NameNode(AtomicExprNode):
# Reference to a local or global variable name.
#
# name string Python name of the variable
# entry Entry Symbol table entry
# type_entry Entry For extension type names, the original type entry
# cf_is_null boolean Is uninitialized before this node
# cf_maybe_null boolean Maybe uninitialized before this node
# allow_null boolean Don't raise UnboundLocalError
# nogil boolean Whether it is used in a nogil context
is_name = True
is_cython_module = False
cython_attribute = None
lhs_of_first_assignment = False # TODO: remove me
is_used_as_rvalue = 0
entry = None
type_entry = None
cf_maybe_null = True
cf_is_null = False
allow_null = False
nogil = False
inferred_type = None
def as_cython_attribute(self):
return self.cython_attribute
def type_dependencies(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is not None and self.entry.type.is_unspecified:
return (self,)
else:
return ()
def infer_type(self, env):
if self.entry is None:
self.entry = env.lookup(self.name)
if self.entry is None or self.entry.type is unspecified_type:
if self.inferred_type is not None:
return self.inferred_type
return py_object_type
elif (self.entry.type.is_extension_type or self.entry.type.is_builtin_type) and \
self.name == self.entry.type.name:
# Unfortunately the type attribute of type objects
# is used for the pointer to the type they represent.
return type_type
elif self.entry.type.is_cfunction:
if self.entry.scope.is_builtin_scope:
# special case: optimised builtin functions must be treated as Python objects
return py_object_type
else:
# special case: referring to a C function must return its pointer
return PyrexTypes.CPtrType(self.entry.type)
else:
# If entry is inferred as pyobject it's safe to use local
# NameNode's inferred_type.
if self.entry.type.is_pyobject and self.inferred_type:
# Overflow may happen if integer
if not (self.inferred_type.is_int and self.entry.might_overflow):
return self.inferred_type
return self.entry.type
def compile_time_value(self, denv):
try:
return denv.lookup(self.name)
except KeyError:
error(self.pos, "Compile-time name '%s' not defined" % self.name)
def get_constant_c_result_code(self):
if not self.entry or self.entry.type.is_pyobject:
return None
return self.entry.cname
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a builtin
# C function with a Python equivalent, manufacture a NameNode
# referring to the Python builtin.
#print "NameNode.coerce_to:", self.name, dst_type ###
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction:
var_entry = entry.as_variable
if var_entry:
if var_entry.is_builtin and var_entry.is_const:
var_entry = env.declare_builtin(var_entry.name, self.pos)
node = NameNode(self.pos, name = self.name)
node.entry = var_entry
node.analyse_rvalue_entry(env)
return node
return super(NameNode, self).coerce_to(dst_type, env)
def declare_from_annotation(self, env, as_target=False):
"""Implements PEP 526 annotation typing in a fairly relaxed way.
Annotations are ignored for global variables, Python class attributes and already declared variables.
String literals are allowed and ignored.
The ambiguous Python types 'int' and 'long' are ignored and the 'cython.int' form must be used instead.
"""
if not env.directives['annotation_typing']:
return
if env.is_module_scope or env.is_py_class_scope:
# annotations never create global cdef names and Python classes don't support them anyway
return
name = self.name
if self.entry or env.lookup_here(name) is not None:
# already declared => ignore annotation
return
annotation = self.annotation
if annotation.is_string_literal:
# name: "description" => not a type, but still a declared variable or attribute
atype = None
else:
_, atype = analyse_type_annotation(annotation, env)
if atype is None:
atype = unspecified_type if as_target and env.directives['infer_types'] != False else py_object_type
self.entry = env.declare_var(name, atype, self.pos, is_cdef=not as_target)
self.entry.annotation = annotation
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module.
# Returns the module scope, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.as_module:
return entry.as_module
return None
def analyse_as_type(self, env):
if self.cython_attribute:
type = PyrexTypes.parse_basic_type(self.cython_attribute)
else:
type = PyrexTypes.parse_basic_type(self.name)
if type:
return type
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
return entry.type
else:
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type.
# Returns the extension type, or None.
entry = self.entry
if not entry:
entry = env.lookup(self.name)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_target_declaration(self, env):
if not self.entry:
self.entry = env.lookup_here(self.name)
if not self.entry and self.annotation is not None:
# name : type = ...
self.declare_from_annotation(env, as_target=True)
if not self.entry:
if env.directives['warn.undeclared']:
warning(self.pos, "implicit declaration of '%s'" % self.name, 1)
if env.directives['infer_types'] != False:
type = unspecified_type
else:
type = py_object_type
self.entry = env.declare_var(self.name, type, self.pos)
if self.entry.is_declared_generic:
self.result_ctype = py_object_type
if self.entry.as_module:
# cimported modules namespace can shadow actual variables
self.entry.is_variable = 1
def analyse_types(self, env):
self.initialized_check = env.directives['initializedcheck']
entry = self.entry
if entry is None:
entry = env.lookup(self.name)
if not entry:
entry = env.declare_builtin(self.name, self.pos)
if entry and entry.is_builtin and entry.is_const:
self.is_literal = True
if not entry:
self.type = PyrexTypes.error_type
return self
self.entry = entry
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
self.analyse_rvalue_entry(env)
return self
def analyse_target_types(self, env):
self.analyse_entry(env, is_target=True)
entry = self.entry
if entry.is_cfunction and entry.as_variable:
# FIXME: unify "is_overridable" flags below
if (entry.is_overridable or entry.type.is_overridable) or not self.is_lvalue() and entry.fused_cfunction:
# We need this for assigning to cpdef names and for the fused 'def' TreeFragment
entry = self.entry = entry.as_variable
self.type = entry.type
if self.type.is_const:
error(self.pos, "Assignment to const '%s'" % self.name)
if self.type.is_reference:
error(self.pos, "Assignment to reference '%s'" % self.name)
if not self.is_lvalue():
error(self.pos, "Assignment to non-lvalue '%s'" % self.name)
self.type = PyrexTypes.error_type
entry.used = 1
if entry.type.is_buffer:
from . import Buffer
Buffer.used_buffer_aux_vars(entry)
return self
def analyse_rvalue_entry(self, env):
#print "NameNode.analyse_rvalue_entry:", self.name ###
#print "Entry:", self.entry.__dict__ ###
self.analyse_entry(env)
entry = self.entry
if entry.is_declared_generic:
self.result_ctype = py_object_type
if entry.is_pyglobal or entry.is_builtin:
if entry.is_builtin and entry.is_const:
self.is_temp = 0
else:
self.is_temp = 1
self.is_used_as_rvalue = 1
elif entry.type.is_memoryviewslice:
self.is_temp = False
self.is_used_as_rvalue = True
self.use_managed_ref = True
return self
def nogil_check(self, env):
self.nogil = True
if self.is_used_as_rvalue:
entry = self.entry
if entry.is_builtin:
if not entry.is_const: # cached builtins are ok
self.gil_error()
elif entry.is_pyglobal:
self.gil_error()
gil_message = "Accessing Python global or builtin"
def analyse_entry(self, env, is_target=False):
#print "NameNode.analyse_entry:", self.name ###
self.check_identifier_kind()
entry = self.entry
type = entry.type
if (not is_target and type.is_pyobject and self.inferred_type and
self.inferred_type.is_builtin_type):
# assume that type inference is smarter than the static entry
type = self.inferred_type
self.type = type
def check_identifier_kind(self):
# Check that this is an appropriate kind of name for use in an
# expression. Also finds the variable entry associated with
# an extension type.
entry = self.entry
if entry.is_type and entry.type.is_extension_type:
self.type_entry = entry
if entry.is_type and entry.type.is_enum:
py_entry = Symtab.Entry(self.name, None, py_object_type)
py_entry.is_pyglobal = True
py_entry.scope = self.entry.scope
self.entry = py_entry
elif not (entry.is_const or entry.is_variable or
entry.is_builtin or entry.is_cfunction or
entry.is_cpp_class):
if self.entry.as_variable:
self.entry = self.entry.as_variable
elif not self.is_cython_module:
error(self.pos, "'%s' is not a constant, variable or function identifier" % self.name)
def is_cimported_module_without_shadow(self, env):
if self.is_cython_module or self.cython_attribute:
return False
entry = self.entry or env.lookup(self.name)
return entry.as_module and not entry.is_variable
def is_simple(self):
# If it's not a C variable, it'll be in a temp.
return 1
def may_be_none(self):
if self.cf_state and self.type and (self.type.is_pyobject or
self.type.is_memoryviewslice):
# gard against infinite recursion on self-dependencies
if getattr(self, '_none_checking', False):
# self-dependency - either this node receives a None
# value from *another* node, or it can not reference
# None at this point => safe to assume "not None"
return False
self._none_checking = True
# evaluate control flow state to see if there were any
# potential None values assigned to the node so far
may_be_none = False
for assignment in self.cf_state:
if assignment.rhs.may_be_none():
may_be_none = True
break
del self._none_checking
return may_be_none
return super(NameNode, self).may_be_none()
def nonlocally_immutable(self):
if ExprNode.nonlocally_immutable(self):
return True
entry = self.entry
if not entry or entry.in_closure:
return False
return entry.is_local or entry.is_arg or entry.is_builtin or entry.is_readonly
def calculate_target_results(self, env):
pass
def check_const(self):
entry = self.entry
if entry is not None and not (
entry.is_const or
entry.is_cfunction or
entry.is_builtin or
entry.type.is_const):
self.not_const()
return False
return True
def check_const_addr(self):
entry = self.entry
if not (entry.is_cglobal or entry.is_cfunction or entry.is_builtin):
self.addr_not_const()
return False
return True
def is_lvalue(self):
return (
self.entry.is_variable and
not self.entry.is_readonly
) or (
self.entry.is_cfunction and
self.entry.is_overridable
)
def is_addressable(self):
return self.entry.is_variable and not self.type.is_memoryviewslice
def is_ephemeral(self):
# Name nodes are never ephemeral, even if the
# result is in a temporary.
return 0
def calculate_result_code(self):
entry = self.entry
if not entry:
return "" # There was an error earlier
return entry.cname
def generate_result_code(self, code):
assert hasattr(self, 'entry')
entry = self.entry
if entry is None:
return # There was an error earlier
if entry.utility_code:
code.globalstate.use_utility_code(entry.utility_code)
if entry.is_builtin and entry.is_const:
return # Lookup already cached
elif entry.is_pyclass_attr:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.is_builtin:
namespace = Naming.builtins_cname
else: # entry.is_pyglobal
namespace = entry.scope.namespace_cname
if not self.cf_is_null:
code.putln(
'%s = PyObject_GetItem(%s, %s);' % (
self.result(),
namespace,
interned_cname))
code.putln('if (unlikely(!%s)) {' % self.result())
code.putln('PyErr_Clear();')
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'__Pyx_GetModuleGlobalName(%s, %s);' % (
self.result(),
interned_cname))
if not self.cf_is_null:
code.putln("}")
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif entry.is_builtin and not entry.scope.is_module_scope:
# known builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetBuiltinName", "ObjectHandling.c"))
code.putln(
'%s = __Pyx_GetBuiltinName(%s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_pyglobal or (entry.is_builtin and entry.scope.is_module_scope):
# name in class body, global name or unknown builtin
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
if entry.scope.is_module_scope:
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetModuleGlobalName", "ObjectHandling.c"))
code.putln(
'__Pyx_GetModuleGlobalName(%s, %s); %s' % (
self.result(),
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
else:
# FIXME: is_pyglobal is also used for class namespace
code.globalstate.use_utility_code(
UtilityCode.load_cached("GetNameInClass", "ObjectHandling.c"))
code.putln(
'__Pyx_GetNameInClass(%s, %s, %s); %s' % (
self.result(),
entry.scope.namespace_cname,
interned_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif entry.is_local or entry.in_closure or entry.from_closure or entry.type.is_memoryviewslice:
# Raise UnboundLocalError for objects and memoryviewslices
raise_unbound = (
(self.cf_maybe_null or self.cf_is_null) and not self.allow_null)
null_code = entry.type.check_for_null_code(entry.cname)
memslice_check = entry.type.is_memoryviewslice and self.initialized_check
if null_code and raise_unbound and (entry.type.is_pyobject or memslice_check):
code.put_error_if_unbound(self.pos, entry, self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
#print "NameNode.generate_assignment_code:", self.name ###
entry = self.entry
if entry is None:
return # There was an error earlier
if (self.entry.type.is_ptr and isinstance(rhs, ListNode)
and not self.lhs_of_first_assignment and not rhs.in_module_scope):
error(self.pos, "Literal list must be assigned to pointer at time of declaration")
# is_pyglobal seems to be True for module level-globals only.
# We use this to access class->tp_dict if necessary.
if entry.is_pyglobal:
assert entry.type.is_pyobject, "Python global or builtin not a Python object"
interned_cname = code.intern_identifier(self.entry.name)
namespace = self.entry.scope.namespace_cname
if entry.is_member:
# if the entry is a member we have to cheat: SetAttr does not work
# on types, so we create a descriptor which is then added to tp_dict
setter = 'PyDict_SetItem'
namespace = '%s->tp_dict' % namespace
elif entry.scope.is_module_scope:
setter = 'PyDict_SetItem'
namespace = Naming.moddict_cname
elif entry.is_pyclass_attr:
code.globalstate.use_utility_code(UtilityCode.load_cached("SetNameInClass", "ObjectHandling.c"))
setter = '__Pyx_SetNameInClass'
else:
assert False, repr(entry)
code.put_error_if_neg(
self.pos,
'%s(%s, %s, %s)' % (
setter,
namespace,
interned_cname,
rhs.py_result()))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating disposal code for %s" % rhs)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
if entry.is_member:
# in Py2.6+, we need to invalidate the method cache
code.putln("PyType_Modified(%s);" %
entry.scope.parent_type.typeptr_cname)
else:
if self.type.is_memoryviewslice:
self.generate_acquire_memoryviewslice(rhs, code)
elif self.type.is_buffer:
# Generate code for doing the buffer release/acquisition.
# This might raise an exception in which case the assignment (done
# below) will not happen.
#
# The reason this is not in a typetest-like node is because the
# variables that the acquired buffer info is stored to is allocated
# per entry and coupled with it.
self.generate_acquire_buffer(rhs, code)
assigned = False
if self.type.is_pyobject:
#print "NameNode.generate_assignment_code: to", self.name ###
#print "...from", rhs ###
#print "...LHS type", self.type, "ctype", self.ctype() ###
#print "...RHS type", rhs.type, "ctype", rhs.ctype() ###
if self.use_managed_ref:
rhs.make_owned_reference(code)
is_external_ref = entry.is_cglobal or self.entry.in_closure or self.entry.from_closure
if is_external_ref:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xgotref(self.py_result())
else:
code.put_gotref(self.py_result())
assigned = True
if entry.is_cglobal:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
if not self.cf_is_null:
if self.cf_maybe_null:
code.put_xdecref_set(
self.result(), rhs.result_as(self.ctype()))
else:
code.put_decref_set(
self.result(), rhs.result_as(self.ctype()))
else:
assigned = False
if is_external_ref:
code.put_giveref(rhs.py_result())
if not self.type.is_memoryviewslice:
if not assigned:
if overloaded_assignment:
result = rhs.result()
if exception_check == '+':
translate_cpp_exception(
code, self.pos,
'%s = %s;' % (self.result(), result),
self.result() if self.type.is_pyobject else None,
exception_value, self.in_nogil_context)
else:
code.putln('%s = %s;' % (self.result(), result))
else:
result = rhs.result_as(self.ctype())
if is_pythran_expr(self.type):
code.putln('new (&%s) decltype(%s){%s};' % (self.result(), self.result(), result))
elif result != self.result():
code.putln('%s = %s;' % (self.result(), result))
if debug_disposal_code:
print("NameNode.generate_assignment_code:")
print("...generating post-assignment code for %s" % rhs)
rhs.generate_post_assignment_code(code)
elif rhs.result_in_temp():
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def generate_acquire_memoryviewslice(self, rhs, code):
"""
Slices, coercions from objects, return values etc are new references.
We have a borrowed reference in case of dst = src
"""
from . import MemoryView
MemoryView.put_acquire_memoryviewslice(
lhs_cname=self.result(),
lhs_type=self.type,
lhs_pos=self.pos,
rhs=rhs,
code=code,
have_gil=not self.in_nogil_context,
first_assignment=self.cf_is_null)
def generate_acquire_buffer(self, rhs, code):
# rhstmp is only used in case the rhs is a complicated expression leading to
# the object, to avoid repeating the same C expression for every reference
# to the rhs. It does NOT hold a reference.
pretty_rhs = isinstance(rhs, NameNode) or rhs.is_temp
if pretty_rhs:
rhstmp = rhs.result_as(self.ctype())
else:
rhstmp = code.funcstate.allocate_temp(self.entry.type, manage_ref=False)
code.putln('%s = %s;' % (rhstmp, rhs.result_as(self.ctype())))
from . import Buffer
Buffer.put_assign_to_buffer(self.result(), rhstmp, self.entry,
is_initialized=not self.lhs_of_first_assignment,
pos=self.pos, code=code)
if not pretty_rhs:
code.putln("%s = 0;" % rhstmp)
code.funcstate.release_temp(rhstmp)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if self.entry is None:
return # There was an error earlier
elif self.entry.is_pyclass_attr:
namespace = self.entry.scope.namespace_cname
interned_cname = code.intern_identifier(self.entry.name)
if ignore_nonexisting:
key_error_code = 'PyErr_Clear(); else'
else:
# minor hack: fake a NameError on KeyError
key_error_code = (
'{ PyErr_Clear(); PyErr_Format(PyExc_NameError, "name \'%%s\' is not defined", "%s"); }' %
self.entry.name)
code.putln(
'if (unlikely(PyObject_DelItem(%s, %s) < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) %s'
' %s '
'}' % (namespace, interned_cname,
key_error_code,
code.error_goto(self.pos)))
elif self.entry.is_pyglobal:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
interned_cname = code.intern_identifier(self.entry.name)
del_code = '__Pyx_PyObject_DelAttrStr(%s, %s)' % (
Naming.module_cname, interned_cname)
if ignore_nonexisting:
code.putln(
'if (unlikely(%s < 0)) {'
' if (likely(PyErr_ExceptionMatches(PyExc_AttributeError))) PyErr_Clear(); else %s '
'}' % (del_code, code.error_goto(self.pos)))
else:
code.put_error_if_neg(self.pos, del_code)
elif self.entry.type.is_pyobject or self.entry.type.is_memoryviewslice:
if not self.cf_is_null:
if self.cf_maybe_null and not ignore_nonexisting:
code.put_error_if_unbound(self.pos, self.entry)
if self.entry.type.is_pyobject:
if self.entry.in_closure:
# generator
if ignore_nonexisting and self.cf_maybe_null:
code.put_xgotref(self.result())
else:
code.put_gotref(self.result())
if ignore_nonexisting and self.cf_maybe_null:
code.put_xdecref(self.result(), self.ctype())
else:
code.put_decref(self.result(), self.ctype())
code.putln('%s = NULL;' % self.result())
else:
code.put_xdecref_memoryviewslice(self.entry.cname,
have_gil=not self.nogil)
else:
error(self.pos, "Deletion of C names not supported")
def annotate(self, code):
if hasattr(self, 'is_called') and self.is_called:
pos = (self.pos[0], self.pos[1], self.pos[2] - len(self.name) - 1)
if self.type.is_pyobject:
style, text = 'py_call', 'python function (%s)'
else:
style, text = 'c_call', 'c function (%s)'
code.annotate(pos, AnnotationItem(style, text % self.type, size=len(self.name)))
class BackquoteNode(ExprNode):
# `expr`
#
# arg ExprNode
type = py_object_type
subexprs = ['arg']
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env)
self.arg = self.arg.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Backquote expression"
def calculate_constant_result(self):
self.constant_result = repr(self.arg.constant_result)
def generate_result_code(self, code):
code.putln(
"%s = PyObject_Repr(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ImportNode(ExprNode):
# Used as part of import statement implementation.
# Implements result =
# __import__(module_name, globals(), None, name_list, level)
#
# module_name StringNode dotted name of module. Empty module
# name means importing the parent package according
# to level
# name_list ListNode or None list of names to be imported
# level int relative import level:
# -1: attempt both relative import and absolute import;
# 0: absolute import;
# >0: the number of parent directories to search
# relative to the current module.
# None: decide the level according to language level and
# directives
type = py_object_type
subexprs = ['module_name', 'name_list']
def analyse_types(self, env):
if self.level is None:
if (env.directives['py2_import'] or
Future.absolute_import not in env.global_scope().context.future_directives):
self.level = -1
else:
self.level = 0
module_name = self.module_name.analyse_types(env)
self.module_name = module_name.coerce_to_pyobject(env)
if self.name_list:
name_list = self.name_list.analyse_types(env)
self.name_list = name_list.coerce_to_pyobject(env)
self.is_temp = 1
return self
gil_message = "Python import"
def generate_result_code(self, code):
if self.name_list:
name_list_code = self.name_list.py_result()
else:
name_list_code = "0"
code.globalstate.use_utility_code(UtilityCode.load_cached("Import", "ImportExport.c"))
import_code = "__Pyx_Import(%s, %s, %d)" % (
self.module_name.py_result(),
name_list_code,
self.level)
if (self.level <= 0 and
self.module_name.is_string_literal and
self.module_name.value in utility_code_for_imports):
helper_func, code_name, code_file = utility_code_for_imports[self.module_name.value]
code.globalstate.use_utility_code(UtilityCode.load_cached(code_name, code_file))
import_code = '%s(%s)' % (helper_func, import_code)
code.putln("%s = %s; %s" % (
self.result(),
import_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class IteratorNode(ExprNode):
# Used as part of for statement implementation.
#
# Implements result = iter(sequence)
#
# sequence ExprNode
type = py_object_type
iter_func_ptr = None
counter_cname = None
cpp_iterator_cname = None
reversed = False # currently only used for list/tuple types (see Optimize.py)
is_async = False
subexprs = ['sequence']
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if (self.sequence.type.is_array or self.sequence.type.is_ptr) and \
not self.sequence.type.is_string:
# C array iteration will be transformed later on
self.type = self.sequence.type
elif self.sequence.type.is_cpp_class:
self.analyse_cpp_types(env)
else:
self.sequence = self.sequence.coerce_to_pyobject(env)
if self.sequence.type in (list_type, tuple_type):
self.sequence = self.sequence.as_none_safe_node("'NoneType' object is not iterable")
self.is_temp = 1
return self
gil_message = "Iterating over Python object"
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def type_dependencies(self, env):
return self.sequence.type_dependencies(env)
def infer_type(self, env):
sequence_type = self.sequence.infer_type(env)
if sequence_type.is_array or sequence_type.is_ptr:
return sequence_type
elif sequence_type.is_cpp_class:
begin = sequence_type.scope.lookup("begin")
if begin is not None:
return begin.type.return_type
elif sequence_type.is_pyobject:
return sequence_type
return py_object_type
def analyse_cpp_types(self, env):
sequence_type = self.sequence.type
if sequence_type.is_ptr:
sequence_type = sequence_type.base_type
begin = sequence_type.scope.lookup("begin")
end = sequence_type.scope.lookup("end")
if (begin is None
or not begin.type.is_cfunction
or begin.type.args):
error(self.pos, "missing begin() on %s" % self.sequence.type)
self.type = error_type
return
if (end is None
or not end.type.is_cfunction
or end.type.args):
error(self.pos, "missing end() on %s" % self.sequence.type)
self.type = error_type
return
iter_type = begin.type.return_type
if iter_type.is_cpp_class:
if env.lookup_operator_for_types(
self.pos,
"!=",
[iter_type, end.type.return_type]) is None:
error(self.pos, "missing operator!= on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '++', [iter_type]) is None:
error(self.pos, "missing operator++ on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
if env.lookup_operator_for_types(self.pos, '*', [iter_type]) is None:
error(self.pos, "missing operator* on result of begin() on %s" % self.sequence.type)
self.type = error_type
return
self.type = iter_type
elif iter_type.is_ptr:
if not (iter_type == end.type.return_type):
error(self.pos, "incompatible types for begin() and end()")
self.type = iter_type
else:
error(self.pos, "result type of begin() on %s must be a C++ class or pointer" % self.sequence.type)
self.type = error_type
return
def generate_result_code(self, code):
sequence_type = self.sequence.type
if sequence_type.is_cpp_class:
if self.sequence.is_name:
# safe: C++ won't allow you to reassign to class references
begin_func = "%s.begin" % self.sequence.result()
else:
sequence_type = PyrexTypes.c_ptr_type(sequence_type)
self.cpp_iterator_cname = code.funcstate.allocate_temp(sequence_type, manage_ref=False)
code.putln("%s = &%s;" % (self.cpp_iterator_cname, self.sequence.result()))
begin_func = "%s->begin" % self.cpp_iterator_cname
# TODO: Limit scope.
code.putln("%s = %s();" % (self.result(), begin_func))
return
if sequence_type.is_array or sequence_type.is_ptr:
raise InternalError("for in carray slice not transformed")
is_builtin_sequence = sequence_type in (list_type, tuple_type)
if not is_builtin_sequence:
# reversed() not currently optimised (see Optimize.py)
assert not self.reversed, "internal error: reversed() only implemented for list/tuple objects"
self.may_be_a_sequence = not sequence_type.is_builtin_type
if self.may_be_a_sequence:
code.putln(
"if (likely(PyList_CheckExact(%s)) || PyTuple_CheckExact(%s)) {" % (
self.sequence.py_result(),
self.sequence.py_result()))
if is_builtin_sequence or self.may_be_a_sequence:
self.counter_cname = code.funcstate.allocate_temp(
PyrexTypes.c_py_ssize_t_type, manage_ref=False)
if self.reversed:
if sequence_type is list_type:
init_value = 'PyList_GET_SIZE(%s) - 1' % self.result()
else:
init_value = 'PyTuple_GET_SIZE(%s) - 1' % self.result()
else:
init_value = '0'
code.putln("%s = %s; __Pyx_INCREF(%s); %s = %s;" % (
self.result(),
self.sequence.py_result(),
self.result(),
self.counter_cname,
init_value))
if not is_builtin_sequence:
self.iter_func_ptr = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
if self.may_be_a_sequence:
code.putln("%s = NULL;" % self.iter_func_ptr)
code.putln("} else {")
code.put("%s = -1; " % self.counter_cname)
code.putln("%s = PyObject_GetIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
# PyObject_GetIter() fails if "tp_iternext" is not set, but the check below
# makes it visible to the C compiler that the pointer really isn't NULL, so that
# it can distinguish between the special cases and the generic case
code.putln("%s = Py_TYPE(%s)->tp_iternext; %s" % (
self.iter_func_ptr, self.py_result(),
code.error_goto_if_null(self.iter_func_ptr, self.pos)))
if self.may_be_a_sequence:
code.putln("}")
def generate_next_sequence_item(self, test_name, result_name, code):
assert self.counter_cname, "internal error: counter_cname temp not prepared"
final_size = 'Py%s_GET_SIZE(%s)' % (test_name, self.py_result())
if self.sequence.is_sequence_constructor:
item_count = len(self.sequence.args)
if self.sequence.mult_factor is None:
final_size = item_count
elif isinstance(self.sequence.mult_factor.constant_result, _py_int_types):
final_size = item_count * self.sequence.mult_factor.constant_result
code.putln("if (%s >= %s) break;" % (self.counter_cname, final_size))
if self.reversed:
inc_dec = '--'
else:
inc_dec = '++'
code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
code.putln(
"%s = Py%s_GET_ITEM(%s, %s); __Pyx_INCREF(%s); %s%s; %s" % (
result_name,
test_name,
self.py_result(),
self.counter_cname,
result_name,
self.counter_cname,
inc_dec,
# use the error label to avoid C compiler warnings if we only use it below
code.error_goto_if_neg('0', self.pos)
))
code.putln("#else")
code.putln(
"%s = PySequence_ITEM(%s, %s); %s%s; %s" % (
result_name,
self.py_result(),
self.counter_cname,
self.counter_cname,
inc_dec,
code.error_goto_if_null(result_name, self.pos)))
code.put_gotref(result_name)
code.putln("#endif")
def generate_iter_next_result_code(self, result_name, code):
sequence_type = self.sequence.type
if self.reversed:
code.putln("if (%s < 0) break;" % self.counter_cname)
if sequence_type.is_cpp_class:
if self.cpp_iterator_cname:
end_func = "%s->end" % self.cpp_iterator_cname
else:
end_func = "%s.end" % self.sequence.result()
# TODO: Cache end() call?
code.putln("if (!(%s != %s())) break;" % (
self.result(),
end_func))
code.putln("%s = *%s;" % (
result_name,
self.result()))
code.putln("++%s;" % self.result())
return
elif sequence_type is list_type:
self.generate_next_sequence_item('List', result_name, code)
return
elif sequence_type is tuple_type:
self.generate_next_sequence_item('Tuple', result_name, code)
return
if self.may_be_a_sequence:
code.putln("if (likely(!%s)) {" % self.iter_func_ptr)
code.putln("if (likely(PyList_CheckExact(%s))) {" % self.py_result())
self.generate_next_sequence_item('List', result_name, code)
code.putln("} else {")
self.generate_next_sequence_item('Tuple', result_name, code)
code.putln("}")
code.put("} else ")
code.putln("{")
code.putln(
"%s = %s(%s);" % (
result_name,
self.iter_func_ptr,
self.py_result()))
code.putln("if (unlikely(!%s)) {" % result_name)
code.putln("PyObject* exc_type = PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
code.putln("break;")
code.putln("}")
code.put_gotref(result_name)
code.putln("}")
def free_temps(self, code):
if self.counter_cname:
code.funcstate.release_temp(self.counter_cname)
if self.iter_func_ptr:
code.funcstate.release_temp(self.iter_func_ptr)
self.iter_func_ptr = None
if self.cpp_iterator_cname:
code.funcstate.release_temp(self.cpp_iterator_cname)
ExprNode.free_temps(self, code)
class NextNode(AtomicExprNode):
# Used as part of for statement implementation.
# Implements result = next(iterator)
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def nogil_check(self, env):
# ignore - errors (if any) are already handled by IteratorNode
pass
def type_dependencies(self, env):
return self.iterator.type_dependencies(env)
def infer_type(self, env, iterator_type=None):
if iterator_type is None:
iterator_type = self.iterator.infer_type(env)
if iterator_type.is_ptr or iterator_type.is_array:
return iterator_type.base_type
elif iterator_type.is_cpp_class:
item_type = env.lookup_operator_for_types(self.pos, "*", [iterator_type]).type.return_type
if item_type.is_reference:
item_type = item_type.ref_base_type
if item_type.is_const:
item_type = item_type.const_base_type
return item_type
else:
# Avoid duplication of complicated logic.
fake_index_node = IndexNode(
self.pos,
base=self.iterator.sequence,
index=IntNode(self.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
return fake_index_node.infer_type(env)
def analyse_types(self, env):
self.type = self.infer_type(env, self.iterator.type)
self.is_temp = 1
return self
def generate_result_code(self, code):
self.iterator.generate_iter_next_result_code(self.result(), code)
class AsyncIteratorNode(ExprNode):
# Used as part of 'async for' statement implementation.
#
# Implements result = sequence.__aiter__()
#
# sequence ExprNode
subexprs = ['sequence']
is_async = True
type = py_object_type
is_temp = 1
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
self.sequence = self.sequence.analyse_types(env)
if not self.sequence.type.is_pyobject:
error(self.pos, "async for loops not allowed on C/C++ types")
self.sequence = self.sequence.coerce_to_pyobject(env)
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_GetAsyncIter(%s); %s" % (
self.result(),
self.sequence.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class AsyncNextNode(AtomicExprNode):
# Used as part of 'async for' statement implementation.
# Implements result = iterator.__anext__()
# Created during analyse_types phase.
# The iterator is not owned by this node.
#
# iterator IteratorNode
type = py_object_type
is_temp = 1
def __init__(self, iterator):
AtomicExprNode.__init__(self, iterator.pos)
self.iterator = iterator
def infer_type(self, env):
return py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("AsyncIter", "Coroutine.c"))
code.putln("%s = __Pyx_Coroutine_AsyncIterNext(%s); %s" % (
self.result(),
self.iterator.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class WithExitCallNode(ExprNode):
# The __exit__() call of a 'with' statement. Used in both the
# except and finally clauses.
# with_stat WithStatNode the surrounding 'with' statement
# args TupleNode or ResultStatNode the exception info tuple
# await_expr AwaitExprNode the await expression of an 'async with' statement
subexprs = ['args', 'await_expr']
test_if_run = True
await_expr = None
def analyse_types(self, env):
self.args = self.args.analyse_types(env)
if self.await_expr:
self.await_expr = self.await_expr.analyse_types(env)
self.type = PyrexTypes.c_bint_type
self.is_temp = True
return self
def generate_evaluation_code(self, code):
if self.test_if_run:
# call only if it was not already called (and decref-cleared)
code.putln("if (%s) {" % self.with_stat.exit_var)
self.args.generate_evaluation_code(code)
result_var = code.funcstate.allocate_temp(py_object_type, manage_ref=False)
code.mark_pos(self.pos)
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln("%s = __Pyx_PyObject_Call(%s, %s, NULL);" % (
result_var,
self.with_stat.exit_var,
self.args.result()))
code.put_decref_clear(self.with_stat.exit_var, type=py_object_type)
self.args.generate_disposal_code(code)
self.args.free_temps(code)
code.putln(code.error_goto_if_null(result_var, self.pos))
code.put_gotref(result_var)
if self.await_expr:
# FIXME: result_var temp currently leaks into the closure
self.await_expr.generate_evaluation_code(code, source_cname=result_var, decref_source=True)
code.putln("%s = %s;" % (result_var, self.await_expr.py_result()))
self.await_expr.generate_post_assignment_code(code)
self.await_expr.free_temps(code)
if self.result_is_used:
self.allocate_temp_result(code)
code.putln("%s = __Pyx_PyObject_IsTrue(%s);" % (self.result(), result_var))
code.put_decref_clear(result_var, type=py_object_type)
if self.result_is_used:
code.put_error_if_neg(self.pos, self.result())
code.funcstate.release_temp(result_var)
if self.test_if_run:
code.putln("}")
class ExcValueNode(AtomicExprNode):
# Node created during analyse_types phase
# of an ExceptClauseNode to fetch the current
# exception value.
type = py_object_type
def __init__(self, pos):
ExprNode.__init__(self, pos)
def set_var(self, var):
self.var = var
def calculate_result_code(self):
return self.var
def generate_result_code(self, code):
pass
def analyse_types(self, env):
return self
class TempNode(ExprNode):
# Node created during analyse_types phase
# of some nodes to hold a temporary value.
#
# Note: One must call "allocate" and "release" on
# the node during code generation to get/release the temp.
# This is because the temp result is often used outside of
# the regular cycle.
subexprs = []
def __init__(self, pos, type, env=None):
ExprNode.__init__(self, pos)
self.type = type
if type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
def analyse_types(self, env):
return self
def analyse_target_declaration(self, env):
pass
def generate_result_code(self, code):
pass
def allocate(self, code):
self.temp_cname = code.funcstate.allocate_temp(self.type, manage_ref=True)
def release(self, code):
code.funcstate.release_temp(self.temp_cname)
self.temp_cname = None
def result(self):
try:
return self.temp_cname
except:
assert False, "Remember to call allocate/release on TempNode"
raise
# Do not participate in normal temp alloc/dealloc:
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
class PyTempNode(TempNode):
# TempNode holding a Python value.
def __init__(self, pos, env):
TempNode.__init__(self, pos, PyrexTypes.py_object_type, env)
class RawCNameExprNode(ExprNode):
subexprs = []
def __init__(self, pos, type=None, cname=None):
ExprNode.__init__(self, pos, type=type)
if cname is not None:
self.cname = cname
def analyse_types(self, env):
return self
def set_cname(self, cname):
self.cname = cname
def result(self):
return self.cname
def generate_result_code(self, code):
pass
#-------------------------------------------------------------------
#
# F-strings
#
#-------------------------------------------------------------------
class JoinedStrNode(ExprNode):
# F-strings
#
# values [UnicodeNode|FormattedValueNode] Substrings of the f-string
#
type = unicode_type
is_temp = True
subexprs = ['values']
def analyse_types(self, env):
self.values = [v.analyse_types(env).coerce_to_pyobject(env) for v in self.values]
return self
def may_be_none(self):
# PyUnicode_Join() always returns a Unicode string or raises an exception
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
num_items = len(self.values)
list_var = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
ulength_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
max_char_var = code.funcstate.allocate_temp(PyrexTypes.c_py_ucs4_type, manage_ref=False)
code.putln('%s = PyTuple_New(%s); %s' % (
list_var,
num_items,
code.error_goto_if_null(list_var, self.pos)))
code.put_gotref(list_var)
code.putln("%s = 0;" % ulength_var)
code.putln("%s = 127;" % max_char_var) # at least ASCII character range
for i, node in enumerate(self.values):
node.generate_evaluation_code(code)
node.make_owned_reference(code)
ulength = "__Pyx_PyUnicode_GET_LENGTH(%s)" % node.py_result()
max_char_value = "__Pyx_PyUnicode_MAX_CHAR_VALUE(%s)" % node.py_result()
is_ascii = False
if isinstance(node, UnicodeNode):
try:
# most strings will be ASCII or at least Latin-1
node.value.encode('iso8859-1')
max_char_value = '255'
node.value.encode('us-ascii')
is_ascii = True
except UnicodeEncodeError:
if max_char_value != '255':
# not ISO8859-1 => check BMP limit
max_char = max(map(ord, node.value))
if max_char < 0xD800:
# BMP-only, no surrogate pairs used
max_char_value = '65535'
ulength = str(len(node.value))
elif max_char >= 65536:
# cleary outside of BMP, and not on a 16-bit Unicode system
max_char_value = '1114111'
ulength = str(len(node.value))
else:
# not really worth implementing a check for surrogate pairs here
# drawback: C code can differ when generating on Py2 with 2-byte Unicode
pass
else:
ulength = str(len(node.value))
elif isinstance(node, FormattedValueNode) and node.value.type.is_numeric:
is_ascii = True # formatted C numbers are always ASCII
if not is_ascii:
code.putln("%s = (%s > %s) ? %s : %s;" % (
max_char_var, max_char_value, max_char_var, max_char_value, max_char_var))
code.putln("%s += %s;" % (ulength_var, ulength))
code.put_giveref(node.py_result())
code.putln('PyTuple_SET_ITEM(%s, %s, %s);' % (list_var, i, node.py_result()))
node.generate_post_assignment_code(code)
node.free_temps(code)
code.mark_pos(self.pos)
self.allocate_temp_result(code)
code.globalstate.use_utility_code(UtilityCode.load_cached("JoinPyUnicode", "StringTools.c"))
code.putln('%s = __Pyx_PyUnicode_Join(%s, %d, %s, %s); %s' % (
self.result(),
list_var,
num_items,
ulength_var,
max_char_var,
code.error_goto_if_null(self.py_result(), self.pos)))
code.put_gotref(self.py_result())
code.put_decref_clear(list_var, py_object_type)
code.funcstate.release_temp(list_var)
code.funcstate.release_temp(ulength_var)
code.funcstate.release_temp(max_char_var)
class FormattedValueNode(ExprNode):
# {}-delimited portions of an f-string
#
# value ExprNode The expression itself
# conversion_char str or None Type conversion (!s, !r, !a, or none, or 'd' for integer conversion)
# format_spec JoinedStrNode or None Format string passed to __format__
# c_format_spec str or None If not None, formatting can be done at the C level
subexprs = ['value', 'format_spec']
type = unicode_type
is_temp = True
c_format_spec = None
find_conversion_func = {
's': 'PyObject_Unicode',
'r': 'PyObject_Repr',
'a': 'PyObject_ASCII', # NOTE: mapped to PyObject_Repr() in Py2
'd': '__Pyx_PyNumber_IntOrLong', # NOTE: internal mapping for '%d' formatting
}.get
def may_be_none(self):
# PyObject_Format() always returns a Unicode string or raises an exception
return False
def analyse_types(self, env):
self.value = self.value.analyse_types(env)
if not self.format_spec or self.format_spec.is_string_literal:
c_format_spec = self.format_spec.value if self.format_spec else self.value.type.default_format_spec
if self.value.type.can_coerce_to_pystring(env, format_spec=c_format_spec):
self.c_format_spec = c_format_spec
if self.format_spec:
self.format_spec = self.format_spec.analyse_types(env).coerce_to_pyobject(env)
if self.c_format_spec is None:
self.value = self.value.coerce_to_pyobject(env)
if not self.format_spec and (not self.conversion_char or self.conversion_char == 's'):
if self.value.type is unicode_type and not self.value.may_be_none():
# value is definitely a unicode string and we don't format it any special
return self.value
return self
def generate_result_code(self, code):
if self.c_format_spec is not None and not self.value.type.is_pyobject:
convert_func_call = self.value.type.convert_to_pystring(
self.value.result(), code, self.c_format_spec)
code.putln("%s = %s; %s" % (
self.result(),
convert_func_call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
return
value_result = self.value.py_result()
value_is_unicode = self.value.type is unicode_type and not self.value.may_be_none()
if self.format_spec:
format_func = '__Pyx_PyObject_Format'
format_spec = self.format_spec.py_result()
else:
# common case: expect simple Unicode pass-through if no format spec
format_func = '__Pyx_PyObject_FormatSimple'
# passing a Unicode format string in Py2 forces PyObject_Format() to also return a Unicode string
format_spec = Naming.empty_unicode
conversion_char = self.conversion_char
if conversion_char == 's' and value_is_unicode:
# no need to pipe unicode strings through str()
conversion_char = None
if conversion_char:
fn = self.find_conversion_func(conversion_char)
assert fn is not None, "invalid conversion character found: '%s'" % conversion_char
value_result = '%s(%s)' % (fn, value_result)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormatAndDecref", "StringTools.c"))
format_func += 'AndDecref'
elif self.format_spec:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormat", "StringTools.c"))
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectFormatSimple", "StringTools.c"))
code.putln("%s = %s(%s, %s); %s" % (
self.result(),
format_func,
value_result,
format_spec,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
#-------------------------------------------------------------------
#
# Parallel nodes (cython.parallel.thread(savailable|id))
#
#-------------------------------------------------------------------
class ParallelThreadsAvailableNode(AtomicExprNode):
"""
Note: this is disabled and not a valid directive at this moment
Implements cython.parallel.threadsavailable(). If we are called from the
sequential part of the application, we need to call omp_get_max_threads(),
and in the parallel part we can just call omp_get_num_threads()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("if (omp_in_parallel()) %s = omp_get_max_threads();" %
self.temp_code)
code.putln("else %s = omp_get_num_threads();" % self.temp_code)
code.putln("#else")
code.putln("%s = 1;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
class ParallelThreadIdNode(AtomicExprNode): #, Nodes.ParallelNode):
"""
Implements cython.parallel.threadid()
"""
type = PyrexTypes.c_int_type
def analyse_types(self, env):
self.is_temp = True
# env.add_include_file("omp.h")
return self
def generate_result_code(self, code):
code.putln("#ifdef _OPENMP")
code.putln("%s = omp_get_thread_num();" % self.temp_code)
code.putln("#else")
code.putln("%s = 0;" % self.temp_code)
code.putln("#endif")
def result(self):
return self.temp_code
#-------------------------------------------------------------------
#
# Trailer nodes
#
#-------------------------------------------------------------------
class _IndexingBaseNode(ExprNode):
# Base class for indexing nodes.
#
# base ExprNode the value being indexed
def is_ephemeral(self):
# in most cases, indexing will return a safe reference to an object in a container,
# so we consider the result safe if the base object is
return self.base.is_ephemeral() or self.base.type in (
basestring_type, str_type, bytes_type, bytearray_type, unicode_type)
def check_const_addr(self):
return self.base.check_const_addr() and self.index.check_const()
def is_lvalue(self):
# NOTE: references currently have both is_reference and is_ptr
# set. Since pointers and references have different lvalue
# rules, we must be careful to separate the two.
if self.type.is_reference:
if self.type.ref_base_type.is_array:
# fixed-sized arrays aren't l-values
return False
elif self.type.is_ptr:
# non-const pointers can always be reassigned
return True
# Just about everything else returned by the index operator
# can be an lvalue.
return True
class IndexNode(_IndexingBaseNode):
# Sequence indexing.
#
# base ExprNode
# index ExprNode
# type_indices [PyrexType]
#
# is_fused_index boolean Whether the index is used to specialize a
# c(p)def function
subexprs = ['base', 'index']
type_indices = None
is_subscript = True
is_fused_index = False
def calculate_constant_result(self):
self.constant_result = self.base.constant_result[self.index.constant_result]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
index = self.index.compile_time_value(denv)
try:
return base[index]
except Exception as e:
self.compile_time_value_error(e)
def is_simple(self):
base = self.base
return (base.is_simple() and self.index.is_simple()
and base.type and (base.type.is_ptr or base.type.is_array))
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if isinstance(self.index, SliceNode):
# slicing!
if base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def analyse_target_declaration(self, env):
pass
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if base_type.is_cpp_class:
if isinstance(self.index, TupleNode):
template_values = self.index.args
else:
template_values = [self.index]
type_node = Nodes.TemplatedTypeNode(
pos=self.pos,
positional_args=template_values,
keyword_args=None)
return type_node.analyse(env, base_type=base_type)
elif self.index.is_slice or self.index.is_sequence_constructor:
# memory view
from . import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
axes = [self.index] if self.index.is_slice else list(self.index.args)
return PyrexTypes.MemoryViewSliceType(base_type, MemoryView.get_axes_specs(env, axes))
else:
# C array
index = self.index.compile_time_value(env)
if index is not None:
try:
index = int(index)
except (ValueError, TypeError):
pass
else:
return PyrexTypes.CArrayType(base_type, index)
error(self.pos, "Array size must be a compile time constant")
return None
def type_dependencies(self, env):
return self.base.type_dependencies(env) + self.index.type_dependencies(env)
def infer_type(self, env):
base_type = self.base.infer_type(env)
if self.index.is_slice:
# slicing!
if base_type.is_string:
# sliced C strings must coerce to Python
return bytes_type
elif base_type.is_pyunicode_ptr:
# sliced Py_UNICODE* strings must coerce to Python
return unicode_type
elif base_type in (unicode_type, bytes_type, str_type,
bytearray_type, list_type, tuple_type):
# slicing these returns the same type
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
index_type = self.index.infer_type(env)
if index_type and index_type.is_int or isinstance(self.index, IntNode):
# indexing!
if base_type is unicode_type:
# Py_UCS4 will automatically coerce to a unicode string
# if required, so this is safe. We only infer Py_UCS4
# when the index is a C integer type. Otherwise, we may
# need to use normal Python item access, in which case
# it's faster to return the one-char unicode string than
# to receive it, throw it away, and potentially rebuild it
# on a subsequent PyObject coercion.
return PyrexTypes.c_py_ucs4_type
elif base_type is str_type:
# always returns str - Py2: bytes, Py3: unicode
return base_type
elif base_type is bytearray_type:
return PyrexTypes.c_uchar_type
elif isinstance(self.base, BytesNode):
#if env.global_scope().context.language_level >= 3:
# # inferring 'char' can be made to work in Python 3 mode
# return PyrexTypes.c_char_type
# Py2/3 return different types on indexing bytes objects
return py_object_type
elif base_type in (tuple_type, list_type):
# if base is a literal, take a look at its values
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is not None:
return item_type
elif base_type.is_ptr or base_type.is_array:
return base_type.base_type
elif base_type.is_ctuple and isinstance(self.index, IntNode):
if self.index.has_constant_result():
index = self.index.constant_result
if index < 0:
index += base_type.size
if 0 <= index < base_type.size:
return base_type.components[index]
if base_type.is_cpp_class:
class FakeOperand:
def __init__(self, **kwds):
self.__dict__.update(kwds)
operands = [
FakeOperand(pos=self.pos, type=base_type),
FakeOperand(pos=self.pos, type=index_type),
]
index_func = env.lookup_operator('[]', operands)
if index_func is not None:
return index_func.type.return_type
if is_pythran_expr(base_type) and is_pythran_expr(index_type):
index_with_type = (self.index, index_type)
return PythranExpr(pythran_indexing_type(base_type, [index_with_type]))
# may be slicing or indexing, we don't know
if base_type in (unicode_type, str_type):
# these types always returns their own type on Python indexing/slicing
return base_type
else:
# TODO: Handle buffers (hopefully without too much redundancy).
return py_object_type
def analyse_types(self, env):
return self.analyse_base_and_index_types(env, getting=True)
def analyse_target_types(self, env):
node = self.analyse_base_and_index_types(env, setting=True)
if node.type.is_const:
error(self.pos, "Assignment to const dereference")
if node is self and not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % node.type)
return node
def analyse_base_and_index_types(self, env, getting=False, setting=False,
analyse_base=True):
# Note: This might be cleaned up by having IndexNode
# parsed in a saner way and only construct the tuple if
# needed.
if analyse_base:
self.base = self.base.analyse_types(env)
if self.base.type.is_error:
# Do not visit child tree if base is undeclared to avoid confusing
# error messages
self.type = PyrexTypes.error_type
return self
is_slice = self.index.is_slice
if not env.directives['wraparound']:
if is_slice:
check_negative_indices(self.index.start, self.index.stop)
else:
check_negative_indices(self.index)
# Potentially overflowing index value.
if not is_slice and isinstance(self.index, IntNode) and Utils.long_literal(self.index.value):
self.index = self.index.coerce_to_pyobject(env)
is_memslice = self.base.type.is_memoryviewslice
# Handle the case where base is a literal char* (and we expect a string, not an int)
if not is_memslice and (isinstance(self.base, BytesNode) or is_slice):
if self.base.type.is_string or not (self.base.type.is_ptr or self.base.type.is_array):
self.base = self.base.coerce_to_pyobject(env)
replacement_node = self.analyse_as_buffer_operation(env, getting)
if replacement_node is not None:
return replacement_node
self.nogil = env.nogil
base_type = self.base.type
if not base_type.is_cfunction:
self.index = self.index.analyse_types(env)
self.original_index_type = self.index.type
if base_type.is_unicode_char:
# we infer Py_UNICODE/Py_UCS4 for unicode strings in some
# cases, but indexing must still work for them
if setting:
warning(self.pos, "cannot assign to Unicode string index", level=1)
elif self.index.constant_result in (0, -1):
# uchar[0] => uchar
return self.base
self.base = self.base.coerce_to_pyobject(env)
base_type = self.base.type
if base_type.is_pyobject:
return self.analyse_as_pyobject(env, is_slice, getting, setting)
elif base_type.is_ptr or base_type.is_array:
return self.analyse_as_c_array(env, is_slice)
elif base_type.is_cpp_class:
return self.analyse_as_cpp(env, setting)
elif base_type.is_cfunction:
return self.analyse_as_c_function(env)
elif base_type.is_ctuple:
return self.analyse_as_c_tuple(env, getting, setting)
else:
error(self.pos,
"Attempting to index non-array type '%s'" %
base_type)
self.type = PyrexTypes.error_type
return self
def analyse_as_pyobject(self, env, is_slice, getting, setting):
base_type = self.base.type
if self.index.type.is_unicode_char and base_type is not dict_type:
# TODO: eventually fold into case below and remove warning, once people have adapted their code
warning(self.pos,
"Item lookup of unicode character codes now always converts to a Unicode string. "
"Use an explicit C integer cast to get back the previous integer lookup behaviour.", level=1)
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
elif self.index.type.is_int and base_type is not dict_type:
if (getting
and (base_type in (list_type, tuple_type, bytearray_type))
and (not self.index.type.signed
or not env.directives['wraparound']
or (isinstance(self.index, IntNode) and
self.index.has_constant_result() and self.index.constant_result >= 0))
and not env.directives['boundscheck']):
self.is_temp = 0
else:
self.is_temp = 1
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env).coerce_to_simple(env)
self.original_index_type.create_to_py_utility_code(env)
else:
self.index = self.index.coerce_to_pyobject(env)
self.is_temp = 1
if self.index.type.is_int and base_type is unicode_type:
# Py_UNICODE/Py_UCS4 will automatically coerce to a unicode string
# if required, so this is fast and safe
self.type = PyrexTypes.c_py_ucs4_type
elif self.index.type.is_int and base_type is bytearray_type:
if setting:
self.type = PyrexTypes.c_uchar_type
else:
# not using 'uchar' to enable fast and safe error reporting as '-1'
self.type = PyrexTypes.c_int_type
elif is_slice and base_type in (bytes_type, bytearray_type, str_type, unicode_type, list_type, tuple_type):
self.type = base_type
else:
item_type = None
if base_type in (list_type, tuple_type) and self.index.type.is_int:
item_type = infer_sequence_item_type(
env, self.base, self.index, seq_type=base_type)
if item_type is None:
item_type = py_object_type
self.type = item_type
if base_type in (list_type, tuple_type, dict_type):
# do the None check explicitly (not in a helper) to allow optimising it away
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
self.wrap_in_nonecheck_node(env, getting)
return self
def analyse_as_c_array(self, env, is_slice):
base_type = self.base.type
self.type = base_type.base_type
if is_slice:
self.type = base_type
elif self.index.type.is_pyobject:
self.index = self.index.coerce_to(PyrexTypes.c_py_ssize_t_type, env)
elif not self.index.type.is_int:
error(self.pos, "Invalid index type '%s'" % self.index.type)
return self
def analyse_as_cpp(self, env, setting):
base_type = self.base.type
function = env.lookup_operator("[]", [self.base, self.index])
if function is None:
error(self.pos, "Indexing '%s' not supported for index type '%s'" % (base_type, self.index.type))
self.type = PyrexTypes.error_type
self.result_code = ""
return self
func_type = function.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check:
if not setting:
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.index = self.index.coerce_to(func_type.args[0].type, env)
self.type = func_type.return_type
if setting and not func_type.return_type.is_reference:
error(self.pos, "Can't set non-reference result '%s'" % self.type)
return self
def analyse_as_c_function(self, env):
base_type = self.base.type
if base_type.is_fused:
self.parse_indexed_fused_cdef(env)
else:
self.type_indices = self.parse_index_as_types(env)
self.index = None # FIXME: use a dedicated Node class instead of generic IndexNode
if base_type.templates is None:
error(self.pos, "Can only parameterize template functions.")
self.type = error_type
elif self.type_indices is None:
# Error recorded earlier.
self.type = error_type
elif len(base_type.templates) != len(self.type_indices):
error(self.pos, "Wrong number of template arguments: expected %s, got %s" % (
(len(base_type.templates), len(self.type_indices))))
self.type = error_type
else:
self.type = base_type.specialize(dict(zip(base_type.templates, self.type_indices)))
# FIXME: use a dedicated Node class instead of generic IndexNode
return self
def analyse_as_c_tuple(self, env, getting, setting):
base_type = self.base.type
if isinstance(self.index, IntNode) and self.index.has_constant_result():
index = self.index.constant_result
if -base_type.size <= index < base_type.size:
if index < 0:
index += base_type.size
self.type = base_type.components[index]
else:
error(self.pos,
"Index %s out of bounds for '%s'" %
(index, base_type))
self.type = PyrexTypes.error_type
return self
else:
self.base = self.base.coerce_to_pyobject(env)
return self.analyse_base_and_index_types(env, getting=getting, setting=setting, analyse_base=False)
def analyse_as_buffer_operation(self, env, getting):
"""
Analyse buffer indexing and memoryview indexing/slicing
"""
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
base = self.base
base_type = base.type
replacement_node = None
if base_type.is_memoryviewslice:
# memoryviewslice indexing or slicing
from . import MemoryView
if base.is_memview_slice:
# For memory views, "view[i][j]" is the same as "view[i, j]" => use the latter for speed.
merged_indices = base.merged_indices(indices)
if merged_indices is not None:
base = base.base
base_type = base.type
indices = merged_indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, base_type.ndim)
if have_slices:
replacement_node = MemoryViewSliceNode(self.pos, indices=indices, base=base)
else:
replacement_node = MemoryViewIndexNode(self.pos, indices=indices, base=base)
elif base_type.is_buffer or base_type.is_pythran_expr:
if base_type.is_pythran_expr or len(indices) == base_type.ndim:
# Buffer indexing
is_buffer_access = True
indices = [index.analyse_types(env) for index in indices]
if base_type.is_pythran_expr:
do_replacement = all(
index.type.is_int or index.is_slice or index.type.is_pythran_expr
for index in indices)
if do_replacement:
for i,index in enumerate(indices):
if index.is_slice:
index = SliceIntNode(index.pos, start=index.start, stop=index.stop, step=index.step)
index = index.analyse_types(env)
indices[i] = index
else:
do_replacement = all(index.type.is_int for index in indices)
if do_replacement:
replacement_node = BufferIndexNode(self.pos, indices=indices, base=base)
# On cloning, indices is cloned. Otherwise, unpack index into indices.
assert not isinstance(self.index, CloneNode)
if replacement_node is not None:
replacement_node = replacement_node.analyse_types(env, getting)
return replacement_node
def wrap_in_nonecheck_node(self, env, getting):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
def parse_index_as_types(self, env, required=True):
if isinstance(self.index, TupleNode):
indices = self.index.args
else:
indices = [self.index]
type_indices = []
for index in indices:
type_indices.append(index.analyse_as_type(env))
if type_indices[-1] is None:
if required:
error(index.pos, "not parsable as a type")
return None
return type_indices
def parse_indexed_fused_cdef(self, env):
"""
Interpret fused_cdef_func[specific_type1, ...]
Note that if this method is called, we are an indexed cdef function
with fused argument types, and this IndexNode will be replaced by the
NameNode with specific entry just after analysis of expressions by
AnalyseExpressionsTransform.
"""
self.type = PyrexTypes.error_type
self.is_fused_index = True
base_type = self.base.type
positions = []
if self.index.is_name or self.index.is_attribute:
positions.append(self.index.pos)
elif isinstance(self.index, TupleNode):
for arg in self.index.args:
positions.append(arg.pos)
specific_types = self.parse_index_as_types(env, required=False)
if specific_types is None:
self.index = self.index.analyse_types(env)
if not self.base.entry.as_variable:
error(self.pos, "Can only index fused functions with types")
else:
# A cpdef function indexed with Python objects
self.base.entry = self.entry = self.base.entry.as_variable
self.base.type = self.type = self.entry.type
self.base.is_temp = True
self.is_temp = True
self.entry.used = True
self.is_fused_index = False
return
for i, type in enumerate(specific_types):
specific_types[i] = type.specialize_fused(env)
fused_types = base_type.get_fused_types()
if len(specific_types) > len(fused_types):
return error(self.pos, "Too many types specified")
elif len(specific_types) < len(fused_types):
t = fused_types[len(specific_types)]
return error(self.pos, "Not enough types specified to specialize "
"the function, %s is still fused" % t)
# See if our index types form valid specializations
for pos, specific_type, fused_type in zip(positions,
specific_types,
fused_types):
if not any([specific_type.same_as(t) for t in fused_type.types]):
return error(pos, "Type not in fused type")
if specific_type is None or specific_type.is_error:
return
fused_to_specific = dict(zip(fused_types, specific_types))
type = base_type.specialize(fused_to_specific)
if type.is_fused:
# Only partially specific, this is invalid
error(self.pos,
"Index operation makes function only partially specific")
else:
# Fully specific, find the signature with the specialized entry
for signature in self.base.type.get_all_specialized_function_types():
if type.same_as(signature):
self.type = signature
if self.base.is_attribute:
# Pretend to be a normal attribute, for cdef extension
# methods
self.entry = signature.entry
self.is_attribute = True
self.obj = self.base.obj
self.type.entry.used = True
self.base.type = signature
self.base.entry = signature.entry
break
else:
# This is a bug
raise InternalError("Couldn't find the right signature")
gil_message = "Indexing Python object"
def calculate_result_code(self):
if self.base.type in (list_type, tuple_type, bytearray_type):
if self.base.type is list_type:
index_code = "PyList_GET_ITEM(%s, %s)"
elif self.base.type is tuple_type:
index_code = "PyTuple_GET_ITEM(%s, %s)"
elif self.base.type is bytearray_type:
index_code = "((unsigned char)(PyByteArray_AS_STRING(%s)[%s]))"
else:
assert False, "unexpected base type in indexing: %s" % self.base.type
elif self.base.type.is_cfunction:
return "%s<%s>" % (
self.base.result(),
",".join([param.empty_declaration_code() for param in self.type_indices]))
elif self.base.type.is_ctuple:
index = self.index.constant_result
if index < 0:
index += self.base.type.size
return "%s.f%s" % (self.base.result(), index)
else:
if (self.type.is_ptr or self.type.is_array) and self.type == self.base.type:
error(self.pos, "Invalid use of pointer slice")
return
index_code = "(%s[%s])"
return index_code % (self.base.result(), self.index.result())
def extra_index_params(self, code):
if self.index.type.is_int:
is_list = self.base.type is list_type
wraparound = (
bool(code.globalstate.directives['wraparound']) and
self.original_index_type.signed and
not (isinstance(self.index.constant_result, _py_int_types)
and self.index.constant_result >= 0))
boundscheck = bool(code.globalstate.directives['boundscheck'])
return ", %s, %d, %s, %d, %d, %d" % (
self.original_index_type.empty_declaration_code(),
self.original_index_type.signed and 1 or 0,
self.original_index_type.to_py_function,
is_list, wraparound, boundscheck)
else:
return ""
def generate_result_code(self, code):
if not self.is_temp:
# all handled in self.calculate_result_code()
return
utility_code = None
if self.type.is_pyobject:
error_value = 'NULL'
if self.index.type.is_int:
if self.base.type is list_type:
function = "__Pyx_GetItemInt_List"
elif self.base.type is tuple_type:
function = "__Pyx_GetItemInt_Tuple"
else:
function = "__Pyx_GetItemInt"
utility_code = TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c")
else:
if self.base.type is dict_type:
function = "__Pyx_PyDict_GetItem"
utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
elif self.base.type is py_object_type and self.index.type in (str_type, unicode_type):
# obj[str] is probably doing a dict lookup
function = "__Pyx_PyObject_Dict_GetItem"
utility_code = UtilityCode.load_cached("DictGetItem", "ObjectHandling.c")
else:
function = "__Pyx_PyObject_GetItem"
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("GetItemInt", "ObjectHandling.c"))
utility_code = UtilityCode.load_cached("ObjectGetItem", "ObjectHandling.c")
elif self.type.is_unicode_char and self.base.type is unicode_type:
assert self.index.type.is_int
function = "__Pyx_GetItemInt_Unicode"
error_value = '(Py_UCS4)-1'
utility_code = UtilityCode.load_cached("GetItemIntUnicode", "StringTools.c")
elif self.base.type is bytearray_type:
assert self.index.type.is_int
assert self.type.is_int
function = "__Pyx_GetItemInt_ByteArray"
error_value = '-1'
utility_code = UtilityCode.load_cached("GetItemIntByteArray", "StringTools.c")
elif not (self.base.type.is_cpp_class and self.exception_check):
assert False, "unexpected type %s and base type %s for indexing" % (
self.type, self.base.type)
if utility_code is not None:
code.globalstate.use_utility_code(utility_code)
if self.index.type.is_int:
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type.is_cpp_class and self.exception_check:
translate_cpp_exception(code, self.pos,
"%s = %s[%s];" % (self.result(), self.base.result(),
self.index.result()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
else:
error_check = '!%s' if error_value == 'NULL' else '%%s == %s' % error_value
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.base.py_result(),
index_code,
self.extra_index_params(code),
code.error_goto_if(error_check % self.result(), self.pos)))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def generate_setitem_code(self, value_code, code):
if self.index.type.is_int:
if self.base.type is bytearray_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemIntByteArray", "StringTools.c"))
function = "__Pyx_SetItemInt_ByteArray"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("SetItemInt", "ObjectHandling.c"))
function = "__Pyx_SetItemInt"
index_code = self.index.result()
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_SetItem"
# It would seem that we could specialized lists/tuples, but that
# shouldn't happen here.
# Both PyList_SetItem() and PyTuple_SetItem() take a Py_ssize_t as
# index instead of an object, and bad conversion here would give
# the wrong exception. Also, tuples are supposed to be immutable,
# and raise a TypeError when trying to set their entries
# (PyTuple_SetItem() is for creating new tuples from scratch).
else:
function = "PyObject_SetItem"
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %s%s)" % (
function,
self.base.py_result(),
index_code,
value_code,
self.extra_index_params(code)),
self.pos))
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
self.generate_setitem_code(rhs.py_result(), code)
elif self.base.type is bytearray_type:
value_code = self._check_byte_value(code, rhs)
self.generate_setitem_code(value_code, code)
elif self.base.type.is_cpp_class and self.exception_check and self.exception_check == '+':
if overloaded_assignment and exception_check and \
self.exception_value != exception_value:
# Handle the case that both the index operator and the assignment
# operator have a c++ exception handler and they are not the same.
translate_double_cpp_exception(code, self.pos, self.type,
self.result(), rhs.result(), self.exception_value,
exception_value, self.in_nogil_context)
else:
# Handle the case that only the index operator has a
# c++ exception handler, or that
# both exception handlers are the same.
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), rhs.result()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
else:
code.putln(
"%s = %s;" % (self.result(), rhs.result()))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def _check_byte_value(self, code, rhs):
# TODO: should we do this generally on downcasts, or just here?
assert rhs.type.is_int, repr(rhs.type)
value_code = rhs.result()
if rhs.has_constant_result():
if 0 <= rhs.constant_result < 256:
return value_code
needs_cast = True # make at least the C compiler happy
warning(rhs.pos,
"value outside of range(0, 256)"
" when assigning to byte: %s" % rhs.constant_result,
level=1)
else:
needs_cast = rhs.type != PyrexTypes.c_uchar_type
if not self.nogil:
conditions = []
if rhs.is_literal or rhs.type.signed:
conditions.append('%s < 0' % value_code)
if (rhs.is_literal or not
(rhs.is_temp and rhs.type in (
PyrexTypes.c_uchar_type, PyrexTypes.c_char_type,
PyrexTypes.c_schar_type))):
conditions.append('%s > 255' % value_code)
if conditions:
code.putln("if (unlikely(%s)) {" % ' || '.join(conditions))
code.putln(
'PyErr_SetString(PyExc_ValueError,'
' "byte must be in range(0, 256)"); %s' %
code.error_goto(self.pos))
code.putln("}")
if needs_cast:
value_code = '((unsigned char)%s)' % value_code
return value_code
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.generate_subexpr_evaluation_code(code)
#if self.type.is_pyobject:
if self.index.type.is_int:
function = "__Pyx_DelItemInt"
index_code = self.index.result()
code.globalstate.use_utility_code(
UtilityCode.load_cached("DelItemInt", "ObjectHandling.c"))
else:
index_code = self.index.py_result()
if self.base.type is dict_type:
function = "PyDict_DelItem"
else:
function = "PyObject_DelItem"
code.putln(code.error_goto_if_neg(
"%s(%s, %s%s)" % (
function,
self.base.py_result(),
index_code,
self.extra_index_params(code)),
self.pos))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
class BufferIndexNode(_IndexingBaseNode):
"""
Indexing of buffers and memoryviews. This node is created during type
analysis from IndexNode and replaces it.
Attributes:
base - base node being indexed
indices - list of indexing expressions
"""
subexprs = ['base', 'indices']
is_buffer_access = True
# Whether we're assigning to a buffer (in that case it needs to be writable)
writable_needed = False
# Any indexing temp variables that we need to clean up.
index_temps = ()
def analyse_target_types(self, env):
self.analyse_types(env, getting=False)
def analyse_types(self, env, getting=True):
"""
Analyse types for buffer indexing only. Overridden by memoryview
indexing and slicing subclasses
"""
# self.indices are already analyzed
if not self.base.is_name and not is_pythran_expr(self.base.type):
error(self.pos, "Can only index buffer variables")
self.type = error_type
return self
if not getting:
if not self.base.entry.type.writable:
error(self.pos, "Writing to readonly buffer")
else:
self.writable_needed = True
if self.base.type.is_buffer:
self.base.entry.buffer_aux.writable_needed = True
self.none_error_message = "'NoneType' object is not subscriptable"
self.analyse_buffer_index(env, getting)
self.wrap_in_nonecheck_node(env)
return self
def analyse_buffer_index(self, env, getting):
if is_pythran_expr(self.base.type):
index_with_type_list = [(idx, idx.type) for idx in self.indices]
self.type = PythranExpr(pythran_indexing_type(self.base.type, index_with_type_list))
else:
self.base = self.base.coerce_to_simple(env)
self.type = self.base.type.dtype
self.buffer_type = self.base.type
if getting and (self.type.is_pyobject or self.type.is_pythran_expr):
self.is_temp = True
def analyse_assignment(self, rhs):
"""
Called by IndexNode when this node is assigned to,
with the rhs of the assignment
"""
def wrap_in_nonecheck_node(self, env):
if not env.directives['nonecheck'] or not self.base.may_be_none():
return
self.base = self.base.as_none_safe_node(self.none_error_message)
def nogil_check(self, env):
if self.is_buffer_access or self.is_memview_index:
if self.type.is_pyobject:
error(self.pos, "Cannot access buffer with object dtype without gil")
self.type = error_type
def calculate_result_code(self):
return "(*%s)" % self.buffer_ptr_code
def buffer_entry(self):
base = self.base
if self.base.is_nonecheck:
base = base.arg
return base.type.get_entry(base)
def get_index_in_temp(self, code, ivar):
ret = code.funcstate.allocate_temp(
PyrexTypes.widest_numeric_type(
ivar.type,
PyrexTypes.c_ssize_t_type if ivar.type.signed else PyrexTypes.c_size_t_type),
manage_ref=False)
code.putln("%s = %s;" % (ret, ivar.result()))
return ret
def buffer_lookup_code(self, code):
"""
ndarray[1, 2, 3] and memslice[1, 2, 3]
"""
if self.in_nogil_context:
if self.is_buffer_access or self.is_memview_index:
if code.globalstate.directives['boundscheck']:
warning(self.pos, "Use boundscheck(False) for faster access", level=1)
# Assign indices to temps of at least (s)size_t to allow further index calculations.
self.index_temps = index_temps = [self.get_index_in_temp(code,ivar) for ivar in self.indices]
# Generate buffer access code using these temps
from . import Buffer
buffer_entry = self.buffer_entry()
if buffer_entry.type.is_buffer:
negative_indices = buffer_entry.type.negative_indices
else:
negative_indices = Buffer.buffer_defaults['negative_indices']
return buffer_entry, Buffer.put_buffer_lookup_code(
entry=buffer_entry,
index_signeds=[ivar.type.signed for ivar in self.indices],
index_cnames=index_temps,
directives=code.globalstate.directives,
pos=self.pos, code=code,
negative_indices=negative_indices,
in_nogil_context=self.in_nogil_context)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.generate_subexpr_evaluation_code(code)
self.generate_buffer_setitem_code(rhs, code)
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_buffer_setitem_code(self, rhs, code, op=""):
base_type = self.base.type
if is_pythran_expr(base_type) and is_pythran_supported_type(rhs.type):
obj = code.funcstate.allocate_temp(PythranExpr(pythran_type(self.base.type)), manage_ref=False)
# We have got to do this because we have to declare pythran objects
# at the beginning of the functions.
# Indeed, Cython uses "goto" statement for error management, and
# RAII doesn't work with that kind of construction.
# Moreover, the way Pythran expressions are made is that they don't
# support move-assignation easily.
# This, we explicitly destroy then in-place new objects in this
# case.
code.putln("__Pyx_call_destructor(%s);" % obj)
code.putln("new (&%s) decltype(%s){%s};" % (obj, obj, self.base.pythran_result()))
code.putln("%s%s %s= %s;" % (
obj,
pythran_indexing_code(self.indices),
op,
rhs.pythran_result()))
code.funcstate.release_temp(obj)
return
# Used from generate_assignment_code and InPlaceAssignmentNode
buffer_entry, ptrexpr = self.buffer_lookup_code(code)
if self.buffer_type.dtype.is_pyobject:
# Must manage refcounts. Decref what is already there
# and incref what we put in.
ptr = code.funcstate.allocate_temp(buffer_entry.buf_ptr_type,
manage_ref=False)
rhs_code = rhs.result()
code.putln("%s = %s;" % (ptr, ptrexpr))
code.put_gotref("*%s" % ptr)
code.putln("__Pyx_INCREF(%s); __Pyx_DECREF(*%s);" % (
rhs_code, ptr))
code.putln("*%s %s= %s;" % (ptr, op, rhs_code))
code.put_giveref("*%s" % ptr)
code.funcstate.release_temp(ptr)
else:
# Simple case
code.putln("*%s %s= %s;" % (ptrexpr, op, rhs.result()))
def generate_result_code(self, code):
if is_pythran_expr(self.base.type):
res = self.result()
code.putln("__Pyx_call_destructor(%s);" % res)
code.putln("new (&%s) decltype(%s){%s%s};" % (
res,
res,
self.base.pythran_result(),
pythran_indexing_code(self.indices)))
return
buffer_entry, self.buffer_ptr_code = self.buffer_lookup_code(code)
if self.type.is_pyobject:
# is_temp is True, so must pull out value and incref it.
# NOTE: object temporary results for nodes are declared
# as PyObject *, so we need a cast
code.putln("%s = (PyObject *) *%s;" % (self.result(), self.buffer_ptr_code))
code.putln("__Pyx_INCREF((PyObject*)%s);" % self.result())
def free_subexpr_temps(self, code):
for temp in self.index_temps:
code.funcstate.release_temp(temp)
self.index_temps = ()
super(BufferIndexNode, self).free_subexpr_temps(code)
class MemoryViewIndexNode(BufferIndexNode):
is_memview_index = True
is_buffer_access = False
warned_untyped_idx = False
def analyse_types(self, env, getting=True):
# memoryviewslice indexing or slicing
from . import MemoryView
self.is_pythran_mode = has_np_pythran(env)
indices = self.indices
have_slices, indices, newaxes = MemoryView.unellipsify(indices, self.base.type.ndim)
if not getting:
self.writable_needed = True
if self.base.is_name or self.base.is_attribute:
self.base.entry.type.writable_needed = True
self.memslice_index = (not newaxes and len(indices) == self.base.type.ndim)
axes = []
index_type = PyrexTypes.c_py_ssize_t_type
new_indices = []
if len(indices) - len(newaxes) > self.base.type.ndim:
self.type = error_type
error(indices[self.base.type.ndim].pos,
"Too many indices specified for type %s" % self.base.type)
return self
axis_idx = 0
for i, index in enumerate(indices[:]):
index = index.analyse_types(env)
if index.is_none:
self.is_memview_slice = True
new_indices.append(index)
axes.append(('direct', 'strided'))
continue
access, packing = self.base.type.axes[axis_idx]
axis_idx += 1
if index.is_slice:
self.is_memview_slice = True
if index.step.is_none:
axes.append((access, packing))
else:
axes.append((access, 'strided'))
# Coerce start, stop and step to temps of the right type
for attr in ('start', 'stop', 'step'):
value = getattr(index, attr)
if not value.is_none:
value = value.coerce_to(index_type, env)
#value = value.coerce_to_temp(env)
setattr(index, attr, value)
new_indices.append(value)
elif index.type.is_int or index.type.is_pyobject:
if index.type.is_pyobject and not self.warned_untyped_idx:
warning(index.pos, "Index should be typed for more efficient access", level=2)
MemoryViewIndexNode.warned_untyped_idx = True
self.is_memview_index = True
index = index.coerce_to(index_type, env)
indices[i] = index
new_indices.append(index)
else:
self.type = error_type
error(index.pos, "Invalid index for memoryview specified, type %s" % index.type)
return self
### FIXME: replace by MemoryViewSliceNode if is_memview_slice ?
self.is_memview_index = self.is_memview_index and not self.is_memview_slice
self.indices = new_indices
# All indices with all start/stop/step for slices.
# We need to keep this around.
self.original_indices = indices
self.nogil = env.nogil
self.analyse_operation(env, getting, axes)
self.wrap_in_nonecheck_node(env)
return self
def analyse_operation(self, env, getting, axes):
self.none_error_message = "Cannot index None memoryview slice"
self.analyse_buffer_index(env, getting)
def analyse_broadcast_operation(self, rhs):
"""
Support broadcasting for slice assignment.
E.g.
m_2d[...] = m_1d # or,
m_1d[...] = m_2d # if the leading dimension has extent 1
"""
if self.type.is_memoryviewslice:
lhs = self
if lhs.is_memview_broadcast or rhs.is_memview_broadcast:
lhs.is_memview_broadcast = True
rhs.is_memview_broadcast = True
def analyse_as_memview_scalar_assignment(self, rhs):
lhs = self.analyse_assignment(rhs)
if lhs:
rhs.is_memview_copy_assignment = lhs.is_memview_copy_assignment
return lhs
return self
class MemoryViewSliceNode(MemoryViewIndexNode):
is_memview_slice = True
# No-op slicing operation, this node will be replaced
is_ellipsis_noop = False
is_memview_scalar_assignment = False
is_memview_index = False
is_memview_broadcast = False
def analyse_ellipsis_noop(self, env, getting):
"""Slicing operations needing no evaluation, i.e. m[...] or m[:, :]"""
### FIXME: replace directly
self.is_ellipsis_noop = all(
index.is_slice and index.start.is_none and index.stop.is_none and index.step.is_none
for index in self.indices)
if self.is_ellipsis_noop:
self.type = self.base.type
def analyse_operation(self, env, getting, axes):
from . import MemoryView
if not getting:
self.is_memview_broadcast = True
self.none_error_message = "Cannot assign to None memoryview slice"
else:
self.none_error_message = "Cannot slice None memoryview slice"
self.analyse_ellipsis_noop(env, getting)
if self.is_ellipsis_noop:
return
self.index = None
self.is_temp = True
self.use_managed_ref = True
if not MemoryView.validate_axes(self.pos, axes):
self.type = error_type
return
self.type = PyrexTypes.MemoryViewSliceType(self.base.type.dtype, axes)
if not (self.base.is_simple() or self.base.result_in_temp()):
self.base = self.base.coerce_to_temp(env)
def analyse_assignment(self, rhs):
if not rhs.type.is_memoryviewslice and (
self.type.dtype.assignable_from(rhs.type) or
rhs.type.is_pyobject):
# scalar assignment
return MemoryCopyScalar(self.pos, self)
else:
return MemoryCopySlice(self.pos, self)
def merged_indices(self, indices):
"""Return a new list of indices/slices with 'indices' merged into the current ones
according to slicing rules.
Is used to implement "view[i][j]" => "view[i, j]".
Return None if the indices cannot (easily) be merged at compile time.
"""
if not indices:
return None
# NOTE: Need to evaluate "self.original_indices" here as they might differ from "self.indices".
new_indices = self.original_indices[:]
indices = indices[:]
for i, s in enumerate(self.original_indices):
if s.is_slice:
if s.start.is_none and s.stop.is_none and s.step.is_none:
# Full slice found, replace by index.
new_indices[i] = indices[0]
indices.pop(0)
if not indices:
return new_indices
else:
# Found something non-trivial, e.g. a partial slice.
return None
elif not s.type.is_int:
# Not a slice, not an integer index => could be anything...
return None
if indices:
if len(new_indices) + len(indices) > self.base.type.ndim:
return None
new_indices += indices
return new_indices
def is_simple(self):
if self.is_ellipsis_noop:
# TODO: fix SimpleCallNode.is_simple()
return self.base.is_simple() or self.base.result_in_temp()
return self.result_in_temp()
def calculate_result_code(self):
"""This is called in case this is a no-op slicing node"""
return self.base.result()
def generate_result_code(self, code):
if self.is_ellipsis_noop:
return ### FIXME: remove
buffer_entry = self.buffer_entry()
have_gil = not self.in_nogil_context
# TODO Mark: this is insane, do it better
have_slices = False
it = iter(self.indices)
for index in self.original_indices:
if index.is_slice:
have_slices = True
if not index.start.is_none:
index.start = next(it)
if not index.stop.is_none:
index.stop = next(it)
if not index.step.is_none:
index.step = next(it)
else:
next(it)
assert not list(it)
buffer_entry.generate_buffer_slice_code(
code, self.original_indices, self.result(),
have_gil=have_gil, have_slices=have_slices,
directives=code.globalstate.directives)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
if self.is_ellipsis_noop:
self.generate_subexpr_evaluation_code(code)
else:
self.generate_evaluation_code(code)
if self.is_memview_scalar_assignment:
self.generate_memoryviewslice_assign_scalar_code(rhs, code)
else:
self.generate_memoryviewslice_setslice_code(rhs, code)
if self.is_ellipsis_noop:
self.generate_subexpr_disposal_code(code)
else:
self.generate_disposal_code(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopyNode(ExprNode):
"""
Wraps a memoryview slice for slice assignment.
dst: destination mememoryview slice
"""
subexprs = ['dst']
def __init__(self, pos, dst):
super(MemoryCopyNode, self).__init__(pos)
self.dst = dst
self.type = dst.type
def generate_assignment_code(self, rhs, code, overloaded_assignment=False):
self.dst.generate_evaluation_code(code)
self._generate_assignment_code(rhs, code)
self.dst.generate_disposal_code(code)
self.dst.free_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
class MemoryCopySlice(MemoryCopyNode):
"""
Copy the contents of slice src to slice dst. Does not support indirect
slices.
memslice1[...] = memslice2
memslice1[:] = memslice2
"""
is_memview_copy_assignment = True
copy_slice_cname = "__pyx_memoryview_copy_contents"
def _generate_assignment_code(self, src, code):
dst = self.dst
src.type.assert_direct_dims(src.pos)
dst.type.assert_direct_dims(dst.pos)
code.putln(code.error_goto_if_neg(
"%s(%s, %s, %d, %d, %d)" % (self.copy_slice_cname,
src.result(), dst.result(),
src.type.ndim, dst.type.ndim,
dst.type.dtype.is_pyobject),
dst.pos))
class MemoryCopyScalar(MemoryCopyNode):
"""
Assign a scalar to a slice. dst must be simple, scalar will be assigned
to a correct type and not just something assignable.
memslice1[...] = 0.0
memslice1[:] = 0.0
"""
def __init__(self, pos, dst):
super(MemoryCopyScalar, self).__init__(pos, dst)
self.type = dst.type.dtype
def _generate_assignment_code(self, scalar, code):
from . import MemoryView
self.dst.type.assert_direct_dims(self.dst.pos)
dtype = self.dst.type.dtype
type_decl = dtype.declaration_code("")
slice_decl = self.dst.type.declaration_code("")
code.begin_block()
code.putln("%s __pyx_temp_scalar = %s;" % (type_decl, scalar.result()))
if self.dst.result_in_temp() or self.dst.is_simple():
dst_temp = self.dst.result()
else:
code.putln("%s __pyx_temp_slice = %s;" % (slice_decl, self.dst.result()))
dst_temp = "__pyx_temp_slice"
slice_iter_obj = MemoryView.slice_iter(self.dst.type, dst_temp,
self.dst.type.ndim, code)
p = slice_iter_obj.start_loops()
if dtype.is_pyobject:
code.putln("Py_DECREF(*(PyObject **) %s);" % p)
code.putln("*((%s *) %s) = __pyx_temp_scalar;" % (type_decl, p))
if dtype.is_pyobject:
code.putln("Py_INCREF(__pyx_temp_scalar);")
slice_iter_obj.end_loops()
code.end_block()
class SliceIndexNode(ExprNode):
# 2-element slice indexing
#
# base ExprNode
# start ExprNode or None
# stop ExprNode or None
# slice ExprNode or None constant slice object
subexprs = ['base', 'start', 'stop', 'slice']
slice = None
def infer_type(self, env):
base_type = self.base.infer_type(env)
if base_type.is_string or base_type.is_cpp_class:
return bytes_type
elif base_type.is_pyunicode_ptr:
return unicode_type
elif base_type in (bytes_type, bytearray_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return base_type
elif base_type.is_ptr or base_type.is_array:
return PyrexTypes.c_array_type(base_type.base_type, None)
return py_object_type
def inferable_item_node(self, index=0):
# slicing shouldn't change the result type of the base, but the index might
if index is not not_a_constant and self.start:
if self.start.has_constant_result():
index += self.start.constant_result
else:
index = not_a_constant
return self.base.inferable_item_node(index)
def may_be_none(self):
base_type = self.base.type
if base_type:
if base_type.is_string:
return False
if base_type in (bytes_type, str_type, unicode_type,
basestring_type, list_type, tuple_type):
return False
return ExprNode.may_be_none(self)
def calculate_constant_result(self):
if self.start is None:
start = None
else:
start = self.start.constant_result
if self.stop is None:
stop = None
else:
stop = self.stop.constant_result
self.constant_result = self.base.constant_result[start:stop]
def compile_time_value(self, denv):
base = self.base.compile_time_value(denv)
if self.start is None:
start = 0
else:
start = self.start.compile_time_value(denv)
if self.stop is None:
stop = None
else:
stop = self.stop.compile_time_value(denv)
try:
return base[start:stop]
except Exception as e:
self.compile_time_value_error(e)
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, getting=False)
# when assigning, we must accept any Python type
if node.type.is_pyobject:
node.type = py_object_type
return node
def analyse_types(self, env, getting=True):
self.base = self.base.analyse_types(env)
if self.base.type.is_buffer or self.base.type.is_pythran_expr or self.base.type.is_memoryviewslice:
none_node = NoneNode(self.pos)
index = SliceNode(self.pos,
start=self.start or none_node,
stop=self.stop or none_node,
step=none_node)
index_node = IndexNode(self.pos, index=index, base=self.base)
return index_node.analyse_base_and_index_types(
env, getting=getting, setting=not getting,
analyse_base=False)
if self.start:
self.start = self.start.analyse_types(env)
if self.stop:
self.stop = self.stop.analyse_types(env)
if not env.directives['wraparound']:
check_negative_indices(self.start, self.stop)
base_type = self.base.type
if base_type.is_array and not getting:
# cannot assign directly to C array => try to assign by making a copy
if not self.start and not self.stop:
self.type = base_type
else:
self.type = PyrexTypes.CPtrType(base_type.base_type)
elif base_type.is_string or base_type.is_cpp_string:
self.type = default_str_type(env)
elif base_type.is_pyunicode_ptr:
self.type = unicode_type
elif base_type.is_ptr:
self.type = base_type
elif base_type.is_array:
# we need a ptr type here instead of an array type, as
# array types can result in invalid type casts in the C
# code
self.type = PyrexTypes.CPtrType(base_type.base_type)
else:
self.base = self.base.coerce_to_pyobject(env)
self.type = py_object_type
if base_type.is_builtin_type:
# slicing builtin types returns something of the same type
self.type = base_type
self.base = self.base.as_none_safe_node("'NoneType' object is not subscriptable")
if self.type is py_object_type:
if (not self.start or self.start.is_literal) and \
(not self.stop or self.stop.is_literal):
# cache the constant slice object, in case we need it
none_node = NoneNode(self.pos)
self.slice = SliceNode(
self.pos,
start=copy.deepcopy(self.start or none_node),
stop=copy.deepcopy(self.stop or none_node),
step=none_node
).analyse_types(env)
else:
c_int = PyrexTypes.c_py_ssize_t_type
def allow_none(node, default_value, env):
# Coerce to Py_ssize_t, but allow None as meaning the default slice bound.
from .UtilNodes import EvalWithTempExprNode, ResultRefNode
node_ref = ResultRefNode(node)
new_expr = CondExprNode(
node.pos,
true_val=IntNode(
node.pos,
type=c_int,
value=default_value,
constant_result=int(default_value) if default_value.isdigit() else not_a_constant,
),
false_val=node_ref.coerce_to(c_int, env),
test=PrimaryCmpNode(
node.pos,
operand1=node_ref,
operator='is',
operand2=NoneNode(node.pos),
).analyse_types(env)
).analyse_result_type(env)
return EvalWithTempExprNode(node_ref, new_expr)
if self.start:
if self.start.type.is_pyobject:
self.start = allow_none(self.start, '0', env)
self.start = self.start.coerce_to(c_int, env)
if self.stop:
if self.stop.type.is_pyobject:
self.stop = allow_none(self.stop, 'PY_SSIZE_T_MAX', env)
self.stop = self.stop.coerce_to(c_int, env)
self.is_temp = 1
return self
def analyse_as_type(self, env):
base_type = self.base.analyse_as_type(env)
if base_type and not base_type.is_pyobject:
if not self.start and not self.stop:
# memory view
from . import MemoryView
env.use_utility_code(MemoryView.view_utility_code)
none_node = NoneNode(self.pos)
slice_node = SliceNode(
self.pos,
start=none_node,
stop=none_node,
step=none_node,
)
return PyrexTypes.MemoryViewSliceType(
base_type, MemoryView.get_axes_specs(env, [slice_node]))
return None
nogil_check = Node.gil_error
gil_message = "Slicing Python object"
get_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Get'})
set_slice_utility_code = TempitaUtilityCode.load(
"SliceObject", "ObjectHandling.c", context={'access': 'Set'})
def coerce_to(self, dst_type, env):
if ((self.base.type.is_string or self.base.type.is_cpp_string)
and dst_type in (bytes_type, bytearray_type, str_type, unicode_type)):
if (dst_type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(self.pos,
"default encoding required for conversion from '%s' to '%s'" %
(self.base.type, dst_type))
self.type = dst_type
if dst_type.is_array and self.base.type.is_array:
if not self.start and not self.stop:
# redundant slice building, copy C arrays directly
return self.base.coerce_to(dst_type, env)
# else: check array size if possible
return super(SliceIndexNode, self).coerce_to(dst_type, env)
def generate_result_code(self, code):
if not self.type.is_pyobject:
error(self.pos,
"Slicing is not currently supported for '%s'." % self.type)
return
base_result = self.base.result()
result = self.result()
start_code = self.start_code()
stop_code = self.stop_code()
if self.base.type.is_string:
base_result = self.base.result()
if self.base.type not in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
base_result = '((const char*)%s)' % base_result
if self.type is bytearray_type:
type_name = 'ByteArray'
else:
type_name = self.type.name.title()
if self.stop is None:
code.putln(
"%s = __Pyx_Py%s_FromString(%s + %s); %s" % (
result,
type_name,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_Py%s_FromStringAndSize(%s + %s, %s - %s); %s" % (
result,
type_name,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type.is_pyunicode_ptr:
base_result = self.base.result()
if self.base.type != PyrexTypes.c_py_unicode_ptr_type:
base_result = '((const Py_UNICODE*)%s)' % base_result
if self.stop is None:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicode(%s + %s); %s" % (
result,
base_result,
start_code,
code.error_goto_if_null(result, self.pos)))
else:
code.putln(
"%s = __Pyx_PyUnicode_FromUnicodeAndLength(%s + %s, %s - %s); %s" % (
result,
base_result,
start_code,
stop_code,
start_code,
code.error_goto_if_null(result, self.pos)))
elif self.base.type is unicode_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyUnicode_Substring", "StringTools.c"))
code.putln(
"%s = __Pyx_PyUnicode_Substring(%s, %s, %s); %s" % (
result,
base_result,
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
elif self.type is py_object_type:
code.globalstate.use_utility_code(self.get_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.putln(
"%s = __Pyx_PyObject_GetSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d); %s" % (
result,
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound']),
code.error_goto_if_null(result, self.pos)))
else:
if self.base.type is list_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyList_GetSlice'
elif self.base.type is tuple_type:
code.globalstate.use_utility_code(
TempitaUtilityCode.load_cached("SliceTupleAndList", "ObjectHandling.c"))
cfunc = '__Pyx_PyTuple_GetSlice'
else:
cfunc = 'PySequence_GetSlice'
code.putln(
"%s = %s(%s, %s, %s); %s" % (
result,
cfunc,
self.base.py_result(),
start_code,
stop_code,
code.error_goto_if_null(result, self.pos)))
code.put_gotref(self.py_result())
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.generate_subexpr_evaluation_code(code)
if self.type.is_pyobject:
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_SetSlice(%s, %s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
rhs.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
else:
start_offset = self.start_code() if self.start else '0'
if rhs.type.is_array:
array_length = rhs.type.size
self.generate_slice_guard_code(code, array_length)
else:
array_length = '%s - %s' % (self.stop_code(), start_offset)
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s]), %s, sizeof(%s[0]) * (%s));" % (
self.base.result(), start_offset,
rhs.result(),
self.base.result(), array_length
))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
rhs.generate_disposal_code(code)
rhs.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
if not self.base.type.is_pyobject:
error(self.pos,
"Deleting slices is only supported for Python types, not '%s'." % self.type)
return
self.generate_subexpr_evaluation_code(code)
code.globalstate.use_utility_code(self.set_slice_utility_code)
(has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice) = self.get_slice_config()
code.put_error_if_neg(self.pos,
"__Pyx_PyObject_DelSlice(%s, %s, %s, %s, %s, %s, %d, %d, %d)" % (
self.base.py_result(),
c_start, c_stop,
py_start, py_stop, py_slice,
has_c_start, has_c_stop,
bool(code.globalstate.directives['wraparound'])))
self.generate_subexpr_disposal_code(code)
self.free_subexpr_temps(code)
def get_slice_config(self):
has_c_start, c_start, py_start = False, '0', 'NULL'
if self.start:
has_c_start = not self.start.type.is_pyobject
if has_c_start:
c_start = self.start.result()
else:
py_start = '&%s' % self.start.py_result()
has_c_stop, c_stop, py_stop = False, '0', 'NULL'
if self.stop:
has_c_stop = not self.stop.type.is_pyobject
if has_c_stop:
c_stop = self.stop.result()
else:
py_stop = '&%s' % self.stop.py_result()
py_slice = self.slice and '&%s' % self.slice.py_result() or 'NULL'
return (has_c_start, has_c_stop, c_start, c_stop,
py_start, py_stop, py_slice)
def generate_slice_guard_code(self, code, target_size):
if not self.base.type.is_array:
return
slice_size = self.base.type.size
try:
total_length = slice_size = int(slice_size)
except ValueError:
total_length = None
start = stop = None
if self.stop:
stop = self.stop.result()
try:
stop = int(stop)
if stop < 0:
if total_length is None:
slice_size = '%s + %d' % (slice_size, stop)
else:
slice_size += stop
else:
slice_size = stop
stop = None
except ValueError:
pass
if self.start:
start = self.start.result()
try:
start = int(start)
if start < 0:
if total_length is None:
start = '%s + %d' % (self.base.type.size, start)
else:
start += total_length
if isinstance(slice_size, _py_int_types):
slice_size -= start
else:
slice_size = '%s - (%s)' % (slice_size, start)
start = None
except ValueError:
pass
runtime_check = None
compile_time_check = False
try:
int_target_size = int(target_size)
except ValueError:
int_target_size = None
else:
compile_time_check = isinstance(slice_size, _py_int_types)
if compile_time_check and slice_size < 0:
if int_target_size > 0:
error(self.pos, "Assignment to empty slice.")
elif compile_time_check and start is None and stop is None:
# we know the exact slice length
if int_target_size != slice_size:
error(self.pos, "Assignment to slice of wrong length, expected %s, got %s" % (
slice_size, target_size))
elif start is not None:
if stop is None:
stop = slice_size
runtime_check = "(%s)-(%s)" % (stop, start)
elif stop is not None:
runtime_check = stop
else:
runtime_check = slice_size
if runtime_check:
code.putln("if (unlikely((%s) != (%s))) {" % (runtime_check, target_size))
code.putln(
'PyErr_Format(PyExc_ValueError, "Assignment to slice of wrong length,'
' expected %%" CYTHON_FORMAT_SSIZE_T "d, got %%" CYTHON_FORMAT_SSIZE_T "d",'
' (Py_ssize_t)(%s), (Py_ssize_t)(%s));' % (
target_size, runtime_check))
code.putln(code.error_goto(self.pos))
code.putln("}")
def start_code(self):
if self.start:
return self.start.result()
else:
return "0"
def stop_code(self):
if self.stop:
return self.stop.result()
elif self.base.type.is_array:
return self.base.type.size
else:
return "PY_SSIZE_T_MAX"
def calculate_result_code(self):
# self.result() is not used, but this method must exist
return ""
class SliceNode(ExprNode):
# start:stop:step in subscript list
#
# start ExprNode
# stop ExprNode
# step ExprNode
subexprs = ['start', 'stop', 'step']
is_slice = True
type = slice_type
is_temp = 1
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
start = self.start.analyse_types(env)
stop = self.stop.analyse_types(env)
step = self.step.analyse_types(env)
self.start = start.coerce_to_pyobject(env)
self.stop = stop.coerce_to_pyobject(env)
self.step = step.coerce_to_pyobject(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
gil_message = "Constructing Python slice object"
def calculate_result_code(self):
return self.result_code
def generate_result_code(self, code):
if self.is_literal:
dedup_key = make_dedup_key(self.type, (self,))
self.result_code = code.get_py_const(py_object_type, 'slice', cleanup_level=2, dedup_key=dedup_key)
code = code.get_cached_constants_writer(self.result_code)
if code is None:
return # already initialised
code.mark_pos(self.pos)
code.putln(
"%s = PySlice_New(%s, %s, %s); %s" % (
self.result(),
self.start.py_result(),
self.stop.py_result(),
self.step.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if self.is_literal:
code.put_giveref(self.py_result())
class SliceIntNode(SliceNode):
# start:stop:step in subscript list
# This is just a node to hold start,stop and step nodes that can be
# converted to integers. This does not generate a slice python object.
#
# start ExprNode
# stop ExprNode
# step ExprNode
is_temp = 0
def calculate_constant_result(self):
self.constant_result = slice(
self.start.constant_result,
self.stop.constant_result,
self.step.constant_result)
def compile_time_value(self, denv):
start = self.start.compile_time_value(denv)
stop = self.stop.compile_time_value(denv)
step = self.step.compile_time_value(denv)
try:
return slice(start, stop, step)
except Exception as e:
self.compile_time_value_error(e)
def may_be_none(self):
return False
def analyse_types(self, env):
self.start = self.start.analyse_types(env)
self.stop = self.stop.analyse_types(env)
self.step = self.step.analyse_types(env)
if not self.start.is_none:
self.start = self.start.coerce_to_integer(env)
if not self.stop.is_none:
self.stop = self.stop.coerce_to_integer(env)
if not self.step.is_none:
self.step = self.step.coerce_to_integer(env)
if self.start.is_literal and self.stop.is_literal and self.step.is_literal:
self.is_literal = True
self.is_temp = False
return self
def calculate_result_code(self):
pass
def generate_result_code(self, code):
for a in self.start,self.stop,self.step:
if isinstance(a, CloneNode):
a.arg.result()
class CallNode(ExprNode):
# allow overriding the default 'may_be_none' behaviour
may_return_none = None
def infer_type(self, env):
# TODO(robertwb): Reduce redundancy with analyse_types.
function = self.function
func_type = function.infer_type(env)
if isinstance(function, NewExprNode):
# note: needs call to infer_type() above
return PyrexTypes.CPtrType(function.class_type)
if func_type is py_object_type:
# function might have lied for safety => try to find better type
entry = getattr(function, 'entry', None)
if entry is not None:
func_type = entry.type or func_type
if func_type.is_ptr:
func_type = func_type.base_type
if func_type.is_cfunction:
if getattr(self.function, 'entry', None) and hasattr(self, 'args'):
alternatives = self.function.entry.all_alternatives()
arg_types = [arg.infer_type(env) for arg in self.args]
func_entry = PyrexTypes.best_match(arg_types, alternatives)
if func_entry:
func_type = func_entry.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type.return_type
return func_type.return_type
elif func_type is type_type:
if function.is_name and function.entry and function.entry.type:
result_type = function.entry.type
if result_type.is_extension_type:
return result_type
elif result_type.is_builtin_type:
if function.entry.name == 'float':
return PyrexTypes.c_double_type
elif function.entry.name in Builtin.types_that_construct_their_instance:
return result_type
return py_object_type
def type_dependencies(self, env):
# TODO: Update when Danilo's C++ code merged in to handle the
# the case of function overloading.
return self.function.type_dependencies(env)
def is_simple(self):
# C function calls could be considered simple, but they may
# have side-effects that may hit when multiple operations must
# be effected in order, e.g. when constructing the argument
# sequence for a function call or comparing values.
return False
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
func_type = self.function.type
if func_type is type_type and self.function.is_name:
entry = self.function.entry
if entry.type.is_extension_type:
return False
if (entry.type.is_builtin_type and
entry.name in Builtin.types_that_construct_their_instance):
return False
return ExprNode.may_be_none(self)
def set_py_result_type(self, function, func_type=None):
if func_type is None:
func_type = function.type
if func_type is Builtin.type_type and (
function.is_name and
function.entry and
function.entry.is_builtin and
function.entry.name in Builtin.types_that_construct_their_instance):
# calling a builtin type that returns a specific object type
if function.entry.name == 'float':
# the following will come true later on in a transform
self.type = PyrexTypes.c_double_type
self.result_ctype = PyrexTypes.c_double_type
else:
self.type = Builtin.builtin_types[function.entry.name]
self.result_ctype = py_object_type
self.may_return_none = False
elif function.is_name and function.type_entry:
# We are calling an extension type constructor. As long as we do not
# support __new__(), the result type is clear
self.type = function.type_entry.type
self.result_ctype = py_object_type
self.may_return_none = False
else:
self.type = py_object_type
def analyse_as_type_constructor(self, env):
type = self.function.analyse_as_type(env)
if type and type.is_struct_or_union:
args, kwds = self.explicit_args_kwds()
items = []
for arg, member in zip(args, type.scope.var_entries):
items.append(DictItemNode(pos=arg.pos, key=StringNode(pos=arg.pos, value=member.name), value=arg))
if kwds:
items += kwds.key_value_pairs
self.key_value_pairs = items
self.__class__ = DictNode
self.analyse_types(env) # FIXME
self.coerce_to(type, env)
return True
elif type and type.is_cpp_class:
self.args = [ arg.analyse_types(env) for arg in self.args ]
constructor = type.scope.lookup("")
if not constructor:
error(self.function.pos, "no constructor found for C++ type '%s'" % self.function.name)
self.type = error_type
return self
self.function = RawCNameExprNode(self.function.pos, constructor.type)
self.function.entry = constructor
self.function.set_cname(type.empty_declaration_code())
self.analyse_c_function_call(env)
self.type = type
return True
def is_lvalue(self):
return self.type.is_reference
def nogil_check(self, env):
func_type = self.function_type()
if func_type.is_pyobject:
self.gil_error()
elif not func_type.is_error and not getattr(func_type, 'nogil', False):
self.gil_error()
gil_message = "Calling gil-requiring function"
class SimpleCallNode(CallNode):
# Function call without keyword, * or ** args.
#
# function ExprNode
# args [ExprNode]
# arg_tuple ExprNode or None used internally
# self ExprNode or None used internally
# coerced_self ExprNode or None used internally
# wrapper_call bool used internally
# has_optional_args bool used internally
# nogil bool used internally
subexprs = ['self', 'coerced_self', 'function', 'args', 'arg_tuple']
self = None
coerced_self = None
arg_tuple = None
wrapper_call = False
has_optional_args = False
nogil = False
analysed = False
overflowcheck = False
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
args = [arg.compile_time_value(denv) for arg in self.args]
try:
return function(*args)
except Exception as e:
self.compile_time_value_error(e)
def analyse_as_type(self, env):
attr = self.function.as_cython_attribute()
if attr == 'pointer':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
else:
type = self.args[0].analyse_as_type(env)
if not type:
error(self.args[0].pos, "Unknown type")
else:
return PyrexTypes.CPtrType(type)
elif attr == 'typeof':
if len(self.args) != 1:
error(self.args.pos, "only one type allowed.")
operand = self.args[0].analyse_types(env)
return operand.type
def explicit_args_kwds(self):
return self.args, None
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
if self.analysed:
return self
self.analysed = True
self.function.is_called = 1
self.function = self.function.analyse_types(env)
function = self.function
if function.is_attribute and function.entry and function.entry.is_cmethod:
# Take ownership of the object from which the attribute
# was obtained, because we need to pass it as 'self'.
self.self = function.obj
function.obj = CloneNode(self.self)
func_type = self.function_type()
self.is_numpy_call_with_exprs = False
if (has_np_pythran(env) and function.is_numpy_attribute and
pythran_is_numpy_func_supported(function)):
has_pythran_args = True
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env)
for arg in self.arg_tuple.args:
has_pythran_args &= is_pythran_supported_node_or_none(arg)
self.is_numpy_call_with_exprs = bool(has_pythran_args)
if self.is_numpy_call_with_exprs:
env.add_include_file(pythran_get_func_include_file(function))
return NumPyMethodCallNode.from_node(
self,
function_cname=pythran_functor(function),
arg_tuple=self.arg_tuple,
type=PythranExpr(pythran_func_type(function, self.arg_tuple.args)),
)
elif func_type.is_pyobject:
self.arg_tuple = TupleNode(self.pos, args = self.args)
self.arg_tuple = self.arg_tuple.analyse_types(env).coerce_to_pyobject(env)
self.args = None
self.set_py_result_type(function, func_type)
self.is_temp = 1
else:
self.args = [ arg.analyse_types(env) for arg in self.args ]
self.analyse_c_function_call(env)
if func_type.exception_check == '+':
self.is_temp = True
return self
def function_type(self):
# Return the type of the function being called, coercing a function
# pointer to a function if necessary. If the function has fused
# arguments, return the specific type.
func_type = self.function.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type
def analyse_c_function_call(self, env):
func_type = self.function.type
if func_type is error_type:
self.type = error_type
return
if func_type.is_cfunction and func_type.is_static_method:
if self.self and self.self.type.is_extension_type:
# To support this we'd need to pass self to determine whether
# it was overloaded in Python space (possibly via a Cython
# superclass turning a cdef method into a cpdef one).
error(self.pos, "Cannot call a static method on an instance variable.")
args = self.args
elif self.self:
args = [self.self] + self.args
else:
args = self.args
if func_type.is_cpp_class:
overloaded_entry = self.function.type.scope.lookup("operator()")
if overloaded_entry is None:
self.type = PyrexTypes.error_type
self.result_code = ""
return
elif hasattr(self.function, 'entry'):
overloaded_entry = self.function.entry
elif self.function.is_subscript and self.function.is_fused_index:
overloaded_entry = self.function.type.entry
else:
overloaded_entry = None
if overloaded_entry:
if self.function.type.is_fused:
functypes = self.function.type.get_all_specialized_function_types()
alternatives = [f.entry for f in functypes]
else:
alternatives = overloaded_entry.all_alternatives()
entry = PyrexTypes.best_match(
[arg.type for arg in args], alternatives, self.pos, env, args)
if not entry:
self.type = PyrexTypes.error_type
self.result_code = ""
return
entry.used = True
if not func_type.is_cpp_class:
self.function.entry = entry
self.function.type = entry.type
func_type = self.function_type()
else:
entry = None
func_type = self.function_type()
if not func_type.is_cfunction:
error(self.pos, "Calling non-function type '%s'" % func_type)
self.type = PyrexTypes.error_type
self.result_code = ""
return
# Check no. of args
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(args)
if func_type.optional_arg_count and expected_nargs != actual_nargs:
self.has_optional_args = 1
self.is_temp = 1
# check 'self' argument
if entry and entry.is_cmethod and func_type.args and not func_type.is_static_method:
formal_arg = func_type.args[0]
arg = args[0]
if formal_arg.not_none:
if self.self:
self.self = self.self.as_none_safe_node(
"'NoneType' object has no attribute '%{0}s'".format('.30' if len(entry.name) <= 30 else ''),
error='PyExc_AttributeError',
format_args=[entry.name])
else:
# unbound method
arg = arg.as_none_safe_node(
"descriptor '%s' requires a '%s' object but received a 'NoneType'",
format_args=[entry.name, formal_arg.type.name])
if self.self:
if formal_arg.accept_builtin_subtypes:
arg = CMethodSelfCloneNode(self.self)
else:
arg = CloneNode(self.self)
arg = self.coerced_self = arg.coerce_to(formal_arg.type, env)
elif formal_arg.type.is_builtin_type:
# special case: unbound methods of builtins accept subtypes
arg = arg.coerce_to(formal_arg.type, env)
if arg.type.is_builtin_type and isinstance(arg, PyTypeTestNode):
arg.exact_builtin_type = False
args[0] = arg
# Coerce arguments
some_args_in_temps = False
for i in range(min(max_nargs, actual_nargs)):
formal_arg = func_type.args[i]
formal_type = formal_arg.type
arg = args[i].coerce_to(formal_type, env)
if formal_arg.not_none:
# C methods must do the None checks at *call* time
arg = arg.as_none_safe_node(
"cannot pass None into a C function argument that is declared 'not None'")
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if i == 0 and self.self is not None:
# a method's cloned "self" argument is ok
pass
elif arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
args[i] = arg
# handle additional varargs parameters
for i in range(max_nargs, actual_nargs):
arg = args[i]
if arg.type.is_pyobject:
if arg.type is str_type:
arg_ctype = PyrexTypes.c_char_ptr_type
else:
arg_ctype = arg.type.default_coerced_ctype()
if arg_ctype is None:
error(self.args[i].pos,
"Python object cannot be passed as a varargs parameter")
else:
args[i] = arg = arg.coerce_to(arg_ctype, env)
if arg.is_temp and i > 0:
some_args_in_temps = True
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
if i == 0 and self.self is not None:
continue # self is ok
arg = args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0 or i == 1 and self.self is not None: # skip first arg
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
self.args[:] = args
# Calc result type and code fragment
if isinstance(self.function, NewExprNode):
self.type = PyrexTypes.CPtrType(self.function.class_type)
else:
self.type = func_type.return_type
if self.function.is_name or self.function.is_attribute:
func_entry = self.function.entry
if func_entry and (func_entry.utility_code or func_entry.utility_code_definition):
self.is_temp = 1 # currently doesn't work for self.calculate_result_code()
if self.type.is_pyobject:
self.result_ctype = py_object_type
self.is_temp = 1
elif func_type.exception_value is not None or func_type.exception_check:
self.is_temp = 1
elif self.type.is_memoryviewslice:
self.is_temp = 1
# func_type.exception_check = True
if self.is_temp and self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
# Called in 'nogil' context?
self.nogil = env.nogil
if (self.nogil and
func_type.exception_check and
func_type.exception_check != '+'):
env.use_utility_code(pyerr_occurred_withgil_utility_code)
# C++ exception handler
if func_type.exception_check == '+':
if func_type.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
self.overflowcheck = env.directives['overflowcheck']
def calculate_result_code(self):
return self.c_call_code()
def c_call_code(self):
func_type = self.function_type()
if self.type is PyrexTypes.error_type or not func_type.is_cfunction:
return ""
formal_args = func_type.args
arg_list_code = []
args = list(zip(formal_args, self.args))
max_nargs = len(func_type.args)
expected_nargs = max_nargs - func_type.optional_arg_count
actual_nargs = len(self.args)
for formal_arg, actual_arg in args[:expected_nargs]:
arg_code = actual_arg.result_as(formal_arg.type)
arg_list_code.append(arg_code)
if func_type.is_overridable:
arg_list_code.append(str(int(self.wrapper_call or self.function.entry.is_unbound_cmethod)))
if func_type.optional_arg_count:
if expected_nargs == actual_nargs:
optional_args = 'NULL'
else:
optional_args = "&%s" % self.opt_arg_struct
arg_list_code.append(optional_args)
for actual_arg in self.args[len(formal_args):]:
arg_list_code.append(actual_arg.result())
result = "%s(%s)" % (self.function.result(), ', '.join(arg_list_code))
return result
def is_c_result_required(self):
func_type = self.function_type()
if not func_type.exception_value or func_type.exception_check == '+':
return False # skip allocation of unused result temp
return True
def generate_evaluation_code(self, code):
function = self.function
if function.is_name or function.is_attribute:
code.globalstate.use_entry_utility_code(function.entry)
abs_function_cnames = ('abs', 'labs', '__Pyx_abs_longlong')
is_signed_int = self.type.is_int and self.type.signed
if self.overflowcheck and is_signed_int and function.result() in abs_function_cnames:
code.globalstate.use_utility_code(UtilityCode.load_cached("Common", "Overflow.c"))
code.putln('if (unlikely(%s == __PYX_MIN(%s))) {\
PyErr_SetString(PyExc_OverflowError,\
"Trying to take the absolute value of the most negative integer is not defined."); %s; }' % (
self.args[0].result(),
self.args[0].type.empty_declaration_code(),
code.error_goto(self.pos)))
if not function.type.is_pyobject or len(self.arg_tuple.args) > 1 or (
self.arg_tuple.args and self.arg_tuple.is_literal):
super(SimpleCallNode, self).generate_evaluation_code(code)
return
# Special case 0-args and try to avoid explicit tuple creation for Python calls with 1 arg.
arg = self.arg_tuple.args[0] if self.arg_tuple.args else None
subexprs = (self.self, self.coerced_self, function, arg)
for subexpr in subexprs:
if subexpr is not None:
subexpr.generate_evaluation_code(code)
code.mark_pos(self.pos)
assert self.is_temp
self.allocate_temp_result(code)
if arg is None:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallNoArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallNoArg(%s); %s" % (
self.result(),
function.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
else:
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCallOneArg", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_CallOneArg(%s, %s); %s" % (
self.result(),
function.py_result(),
arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for subexpr in subexprs:
if subexpr is not None:
subexpr.generate_disposal_code(code)
subexpr.free_temps(code)
def generate_result_code(self, code):
func_type = self.function_type()
if func_type.is_pyobject:
arg_code = self.arg_tuple.py_result()
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
self.function.py_result(),
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif func_type.is_cfunction:
if self.has_optional_args:
actual_nargs = len(self.args)
expected_nargs = len(func_type.args) - func_type.optional_arg_count
self.opt_arg_struct = code.funcstate.allocate_temp(
func_type.op_arg_struct.base_type, manage_ref=True)
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
Naming.pyrex_prefix + "n",
len(self.args) - expected_nargs))
args = list(zip(func_type.args, self.args))
for formal_arg, actual_arg in args[expected_nargs:actual_nargs]:
code.putln("%s.%s = %s;" % (
self.opt_arg_struct,
func_type.opt_arg_cname(formal_arg.name),
actual_arg.result_as(formal_arg.type)))
exc_checks = []
if self.type.is_pyobject and self.is_temp:
exc_checks.append("!%s" % self.result())
elif self.type.is_memoryviewslice:
assert self.is_temp
exc_checks.append(self.type.error_condition(self.result()))
elif func_type.exception_check != '+':
exc_val = func_type.exception_value
exc_check = func_type.exception_check
if exc_val is not None:
exc_checks.append("%s == %s" % (self.result(), func_type.return_type.cast_code(exc_val)))
if exc_check:
if self.nogil:
exc_checks.append("__Pyx_ErrOccurredWithGIL()")
else:
exc_checks.append("PyErr_Occurred()")
if self.is_temp or exc_checks:
rhs = self.c_call_code()
if self.result():
lhs = "%s = " % self.result()
if self.is_temp and self.type.is_pyobject:
#return_type = self.type # func_type.return_type
#print "SimpleCallNode.generate_result_code: casting", rhs, \
# "from", return_type, "to pyobject" ###
rhs = typecast(py_object_type, self.type, rhs)
else:
lhs = ""
if func_type.exception_check == '+':
translate_cpp_exception(code, self.pos, '%s%s;' % (lhs, rhs),
self.result() if self.type.is_pyobject else None,
func_type.exception_value, self.nogil)
else:
if exc_checks:
goto_error = code.error_goto_if(" && ".join(exc_checks), self.pos)
else:
goto_error = ""
code.putln("%s%s; %s" % (lhs, rhs, goto_error))
if self.type.is_pyobject and self.result():
code.put_gotref(self.py_result())
if self.has_optional_args:
code.funcstate.release_temp(self.opt_arg_struct)
class NumPyMethodCallNode(ExprNode):
# Pythran call to a NumPy function or method.
#
# function_cname string the function/method to call
# arg_tuple TupleNode the arguments as an args tuple
subexprs = ['arg_tuple']
is_temp = True
may_return_none = True
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
for arg in args:
arg.generate_evaluation_code(code)
code.putln("// function evaluation code for numpy function")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s{}(%s)};" % (
self.result(),
self.result(),
self.function_cname,
", ".join(a.pythran_result() for a in args)))
class PyMethodCallNode(SimpleCallNode):
# Specialised call to a (potential) PyMethodObject with non-constant argument tuple.
# Allows the self argument to be injected directly instead of repacking a tuple for it.
#
# function ExprNode the function/method object to call
# arg_tuple TupleNode the arguments for the args tuple
subexprs = ['function', 'arg_tuple']
is_temp = True
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.function.generate_evaluation_code(code)
assert self.arg_tuple.mult_factor is None
args = self.arg_tuple.args
for arg in args:
arg.generate_evaluation_code(code)
# make sure function is in temp so that we can replace the reference below if it's a method
reuse_function_temp = self.function.is_temp
if reuse_function_temp:
function = self.function.result()
else:
function = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
self.function.make_owned_reference(code)
code.put("%s = %s; " % (function, self.function.py_result()))
self.function.generate_disposal_code(code)
self.function.free_temps(code)
self_arg = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = NULL;" % self_arg)
arg_offset_cname = None
if len(args) > 1:
arg_offset_cname = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % arg_offset_cname)
def attribute_is_likely_method(attr):
obj = attr.obj
if obj.is_name and obj.entry.is_pyglobal:
return False # more likely to be a function
return True
if self.function.is_attribute:
likely_method = 'likely' if attribute_is_likely_method(self.function) else 'unlikely'
elif self.function.is_name and self.function.cf_state:
# not an attribute itself, but might have been assigned from one (e.g. bound method)
for assignment in self.function.cf_state:
value = assignment.rhs
if value and value.is_attribute and value.obj.type and value.obj.type.is_pyobject:
if attribute_is_likely_method(value):
likely_method = 'likely'
break
else:
likely_method = 'unlikely'
else:
likely_method = 'unlikely'
code.putln("if (CYTHON_UNPACK_METHODS && %s(PyMethod_Check(%s))) {" % (likely_method, function))
code.putln("%s = PyMethod_GET_SELF(%s);" % (self_arg, function))
# the following is always true in Py3 (kept only for safety),
# but is false for unbound methods in Py2
code.putln("if (likely(%s)) {" % self_arg)
code.putln("PyObject* function = PyMethod_GET_FUNCTION(%s);" % function)
code.put_incref(self_arg, py_object_type)
code.put_incref("function", py_object_type)
# free method object as early to possible to enable reuse from CPython's freelist
code.put_decref_set(function, "function")
if len(args) > 1:
code.putln("%s = 1;" % arg_offset_cname)
code.putln("}")
code.putln("}")
if not args:
# fastest special case: try to avoid tuple creation
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallNoArg", "ObjectHandling.c"))
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
code.putln(
"%s = (%s) ? __Pyx_PyObject_CallOneArg(%s, %s) : __Pyx_PyObject_CallNoArg(%s);" % (
self.result(), self_arg,
function, self_arg,
function))
code.put_xdecref_clear(self_arg, py_object_type)
code.funcstate.release_temp(self_arg)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
elif len(args) == 1:
# fastest special case: try to avoid tuple creation
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCall2Args", "ObjectHandling.c"))
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCallOneArg", "ObjectHandling.c"))
arg = args[0]
code.putln(
"%s = (%s) ? __Pyx_PyObject_Call2Args(%s, %s, %s) : __Pyx_PyObject_CallOneArg(%s, %s);" % (
self.result(), self_arg,
function, self_arg, arg.py_result(),
function, arg.py_result()))
code.put_xdecref_clear(self_arg, py_object_type)
code.funcstate.release_temp(self_arg)
arg.generate_disposal_code(code)
arg.free_temps(code)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyFunctionFastCall", "ObjectHandling.c"))
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyCFunctionFastCall", "ObjectHandling.c"))
for test_func, call_prefix in [('PyFunction_Check', 'Py'), ('__Pyx_PyFastCFunction_Check', 'PyC')]:
code.putln("#if CYTHON_FAST_%sCALL" % call_prefix.upper())
code.putln("if (%s(%s)) {" % (test_func, function))
code.putln("PyObject *%s[%d] = {%s, %s};" % (
Naming.quick_temp_cname,
len(args)+1,
self_arg,
', '.join(arg.py_result() for arg in args)))
code.putln("%s = __Pyx_%sFunction_FastCall(%s, %s+1-%s, %d+%s); %s" % (
self.result(),
call_prefix,
function,
Naming.quick_temp_cname,
arg_offset_cname,
len(args),
arg_offset_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_xdecref_clear(self_arg, py_object_type)
code.put_gotref(self.py_result())
for arg in args:
arg.generate_disposal_code(code)
code.putln("} else")
code.putln("#endif")
code.putln("{")
args_tuple = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln("%s = PyTuple_New(%d+%s); %s" % (
args_tuple, len(args), arg_offset_cname,
code.error_goto_if_null(args_tuple, self.pos)))
code.put_gotref(args_tuple)
if len(args) > 1:
code.putln("if (%s) {" % self_arg)
code.putln("__Pyx_GIVEREF(%s); PyTuple_SET_ITEM(%s, 0, %s); %s = NULL;" % (
self_arg, args_tuple, self_arg, self_arg)) # stealing owned ref in this case
code.funcstate.release_temp(self_arg)
if len(args) > 1:
code.putln("}")
for i, arg in enumerate(args):
arg.make_owned_reference(code)
code.put_giveref(arg.py_result())
code.putln("PyTuple_SET_ITEM(%s, %d+%s, %s);" % (
args_tuple, i, arg_offset_cname, arg.py_result()))
if len(args) > 1:
code.funcstate.release_temp(arg_offset_cname)
for arg in args:
arg.generate_post_assignment_code(code)
arg.free_temps(code)
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, NULL); %s" % (
self.result(),
function, args_tuple,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.put_decref_clear(args_tuple, py_object_type)
code.funcstate.release_temp(args_tuple)
if len(args) == 1:
code.putln("}")
code.putln("}") # !CYTHON_FAST_PYCALL
if reuse_function_temp:
self.function.generate_disposal_code(code)
self.function.free_temps(code)
else:
code.put_decref_clear(function, py_object_type)
code.funcstate.release_temp(function)
class InlinedDefNodeCallNode(CallNode):
# Inline call to defnode
#
# function PyCFunctionNode
# function_name NameNode
# args [ExprNode]
subexprs = ['args', 'function_name']
is_temp = 1
type = py_object_type
function = None
function_name = None
def can_be_inlined(self):
func_type= self.function.def_node
if func_type.star_arg or func_type.starstar_arg:
return False
if len(func_type.args) != len(self.args):
return False
if func_type.num_kwonly_args:
return False # actually wrong number of arguments
return True
def analyse_types(self, env):
self.function_name = self.function_name.analyse_types(env)
self.args = [ arg.analyse_types(env) for arg in self.args ]
func_type = self.function.def_node
actual_nargs = len(self.args)
# Coerce arguments
some_args_in_temps = False
for i in range(actual_nargs):
formal_type = func_type.args[i].type
arg = self.args[i].coerce_to(formal_type, env)
if arg.is_temp:
if i > 0:
# first argument in temp doesn't impact subsequent arguments
some_args_in_temps = True
elif arg.type.is_pyobject and not env.nogil:
if arg.nonlocally_immutable():
# plain local variables are ok
pass
else:
# we do not safely own the argument's reference,
# but we must make sure it cannot be collected
# before we return from the function, so we create
# an owned temp reference to it
if i > 0: # first argument doesn't matter
some_args_in_temps = True
arg = arg.coerce_to_temp(env)
self.args[i] = arg
if some_args_in_temps:
# if some args are temps and others are not, they may get
# constructed in the wrong order (temps first) => make
# sure they are either all temps or all not temps (except
# for the last argument, which is evaluated last in any
# case)
for i in range(actual_nargs-1):
arg = self.args[i]
if arg.nonlocally_immutable():
# locals, C functions, unassignable types are safe.
pass
elif arg.type.is_cpp_class:
# Assignment has side effects, avoid.
pass
elif env.nogil and arg.type.is_pyobject:
# can't copy a Python reference into a temp in nogil
# env (this is safe: a construction would fail in
# nogil anyway)
pass
else:
#self.args[i] = arg.coerce_to_temp(env)
# instead: issue a warning
if i > 0:
warning(arg.pos, "Argument evaluation order in C function call is undefined and may not be as expected", 0)
break
return self
def generate_result_code(self, code):
arg_code = [self.function_name.py_result()]
func_type = self.function.def_node
for arg, proto_arg in zip(self.args, func_type.args):
if arg.type.is_pyobject:
arg_code.append(arg.result_as(proto_arg.type))
else:
arg_code.append(arg.result())
arg_code = ', '.join(arg_code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
self.function.def_node.entry.pyfunc_cname,
arg_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PythonCapiFunctionNode(ExprNode):
subexprs = []
def __init__(self, pos, py_name, cname, func_type, utility_code = None):
ExprNode.__init__(self, pos, name=py_name, cname=cname,
type=func_type, utility_code=utility_code)
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if self.utility_code:
code.globalstate.use_utility_code(self.utility_code)
def calculate_result_code(self):
return self.cname
class PythonCapiCallNode(SimpleCallNode):
# Python C-API Function call (only created in transforms)
# By default, we assume that the call never returns None, as this
# is true for most C-API functions in CPython. If this does not
# apply to a call, set the following to True (or None to inherit
# the default behaviour).
may_return_none = False
def __init__(self, pos, function_name, func_type,
utility_code = None, py_name=None, **kwargs):
self.type = func_type.return_type
self.result_ctype = self.type
self.function = PythonCapiFunctionNode(
pos, py_name, function_name, func_type,
utility_code = utility_code)
# call this last so that we can override the constructed
# attributes above with explicit keyword arguments if required
SimpleCallNode.__init__(self, pos, **kwargs)
class CachedBuiltinMethodCallNode(CallNode):
# Python call to a method of a known Python builtin (only created in transforms)
subexprs = ['obj', 'args']
is_temp = True
def __init__(self, call_node, obj, method_name, args):
super(CachedBuiltinMethodCallNode, self).__init__(
call_node.pos,
obj=obj, method_name=method_name, args=args,
may_return_none=call_node.may_return_none,
type=call_node.type)
def may_be_none(self):
if self.may_return_none is not None:
return self.may_return_none
return ExprNode.may_be_none(self)
def generate_result_code(self, code):
type_cname = self.obj.type.cname
obj_cname = self.obj.py_result()
args = [arg.py_result() for arg in self.args]
call_code = code.globalstate.cached_unbound_method_call_code(
obj_cname, type_cname, self.method_name, args)
code.putln("%s = %s; %s" % (
self.result(), call_code,
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.result())
class GeneralCallNode(CallNode):
# General Python function call, including keyword,
# * and ** arguments.
#
# function ExprNode
# positional_args ExprNode Tuple of positional arguments
# keyword_args ExprNode or None Dict of keyword arguments
type = py_object_type
subexprs = ['function', 'positional_args', 'keyword_args']
nogil_check = Node.gil_error
def compile_time_value(self, denv):
function = self.function.compile_time_value(denv)
positional_args = self.positional_args.compile_time_value(denv)
keyword_args = self.keyword_args.compile_time_value(denv)
try:
return function(*positional_args, **keyword_args)
except Exception as e:
self.compile_time_value_error(e)
def explicit_args_kwds(self):
if (self.keyword_args and not self.keyword_args.is_dict_literal or
not self.positional_args.is_sequence_constructor):
raise CompileError(self.pos,
'Compile-time keyword arguments must be explicit.')
return self.positional_args.args, self.keyword_args
def analyse_types(self, env):
if self.analyse_as_type_constructor(env):
return self
self.function = self.function.analyse_types(env)
if not self.function.type.is_pyobject:
if self.function.type.is_error:
self.type = error_type
return self
if hasattr(self.function, 'entry'):
node = self.map_to_simple_call_node()
if node is not None and node is not self:
return node.analyse_types(env)
elif self.function.entry.as_variable:
self.function = self.function.coerce_to_pyobject(env)
elif node is self:
error(self.pos,
"Non-trivial keyword arguments and starred "
"arguments not allowed in cdef functions.")
else:
# error was already reported
pass
else:
self.function = self.function.coerce_to_pyobject(env)
if self.keyword_args:
self.keyword_args = self.keyword_args.analyse_types(env)
self.positional_args = self.positional_args.analyse_types(env)
self.positional_args = \
self.positional_args.coerce_to_pyobject(env)
self.set_py_result_type(self.function)
self.is_temp = 1
return self
def map_to_simple_call_node(self):
"""
Tries to map keyword arguments to declared positional arguments.
Returns self to try a Python call, None to report an error
or a SimpleCallNode if the mapping succeeds.
"""
if not isinstance(self.positional_args, TupleNode):
# has starred argument
return self
if not self.keyword_args.is_dict_literal:
# keywords come from arbitrary expression => nothing to do here
return self
function = self.function
entry = getattr(function, 'entry', None)
if not entry:
return self
function_type = entry.type
if function_type.is_ptr:
function_type = function_type.base_type
if not function_type.is_cfunction:
return self
pos_args = self.positional_args.args
kwargs = self.keyword_args
declared_args = function_type.args
if entry.is_cmethod:
declared_args = declared_args[1:] # skip 'self'
if len(pos_args) > len(declared_args):
error(self.pos, "function call got too many positional arguments, "
"expected %d, got %s" % (len(declared_args),
len(pos_args)))
return None
matched_args = set([ arg.name for arg in declared_args[:len(pos_args)]
if arg.name ])
unmatched_args = declared_args[len(pos_args):]
matched_kwargs_count = 0
args = list(pos_args)
# check for duplicate keywords
seen = set(matched_args)
has_errors = False
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name in seen:
error(arg.pos, "argument '%s' passed twice" % name)
has_errors = True
# continue to report more errors if there are any
seen.add(name)
# match keywords that are passed in order
for decl_arg, arg in zip(unmatched_args, kwargs.key_value_pairs):
name = arg.key.value
if decl_arg.name == name:
matched_args.add(name)
matched_kwargs_count += 1
args.append(arg.value)
else:
break
# match keyword arguments that are passed out-of-order, but keep
# the evaluation of non-simple arguments in order by moving them
# into temps
from .UtilNodes import EvalWithTempExprNode, LetRefNode
temps = []
if len(kwargs.key_value_pairs) > matched_kwargs_count:
unmatched_args = declared_args[len(args):]
keywords = dict([ (arg.key.value, (i+len(pos_args), arg))
for i, arg in enumerate(kwargs.key_value_pairs) ])
first_missing_keyword = None
for decl_arg in unmatched_args:
name = decl_arg.name
if name not in keywords:
# missing keyword argument => either done or error
if not first_missing_keyword:
first_missing_keyword = name
continue
elif first_missing_keyword:
if entry.as_variable:
# we might be able to convert the function to a Python
# object, which then allows full calling semantics
# with default values in gaps - currently, we only
# support optional arguments at the end
return self
# wasn't the last keyword => gaps are not supported
error(self.pos, "C function call is missing "
"argument '%s'" % first_missing_keyword)
return None
pos, arg = keywords[name]
matched_args.add(name)
matched_kwargs_count += 1
if arg.value.is_simple():
args.append(arg.value)
else:
temp = LetRefNode(arg.value)
assert temp.is_simple()
args.append(temp)
temps.append((pos, temp))
if temps:
# may have to move preceding non-simple args into temps
final_args = []
new_temps = []
first_temp_arg = temps[0][-1]
for arg_value in args:
if arg_value is first_temp_arg:
break # done
if arg_value.is_simple():
final_args.append(arg_value)
else:
temp = LetRefNode(arg_value)
new_temps.append(temp)
final_args.append(temp)
if new_temps:
args = final_args
temps = new_temps + [ arg for i,arg in sorted(temps) ]
# check for unexpected keywords
for arg in kwargs.key_value_pairs:
name = arg.key.value
if name not in matched_args:
has_errors = True
error(arg.pos,
"C function got unexpected keyword argument '%s'" %
name)
if has_errors:
# error was reported already
return None
# all keywords mapped to positional arguments
# if we are missing arguments, SimpleCallNode will figure it out
node = SimpleCallNode(self.pos, function=function, args=args)
for temp in temps[::-1]:
node = EvalWithTempExprNode(temp, node)
return node
def generate_result_code(self, code):
if self.type.is_error: return
if self.keyword_args:
kwargs = self.keyword_args.py_result()
else:
kwargs = 'NULL'
code.globalstate.use_utility_code(UtilityCode.load_cached(
"PyObjectCall", "ObjectHandling.c"))
code.putln(
"%s = __Pyx_PyObject_Call(%s, %s, %s); %s" % (
self.result(),
self.function.py_result(),
self.positional_args.py_result(),
kwargs,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class AsTupleNode(ExprNode):
# Convert argument to tuple. Used for normalising
# the * argument of a function call.
#
# arg ExprNode
subexprs = ['arg']
is_temp = 1
def calculate_constant_result(self):
self.constant_result = tuple(self.arg.constant_result)
def compile_time_value(self, denv):
arg = self.arg.compile_time_value(denv)
try:
return tuple(arg)
except Exception as e:
self.compile_time_value_error(e)
def analyse_types(self, env):
self.arg = self.arg.analyse_types(env).coerce_to_pyobject(env)
if self.arg.type is tuple_type:
return self.arg.as_none_safe_node("'NoneType' object is not iterable")
self.type = tuple_type
return self
def may_be_none(self):
return False
nogil_check = Node.gil_error
gil_message = "Constructing Python tuple"
def generate_result_code(self, code):
cfunc = "__Pyx_PySequence_Tuple" if self.arg.type in (py_object_type, tuple_type) else "PySequence_Tuple"
code.putln(
"%s = %s(%s); %s" % (
self.result(),
cfunc, self.arg.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class MergedDictNode(ExprNode):
# Helper class for keyword arguments and other merged dicts.
#
# keyword_args [DictNode or other ExprNode]
subexprs = ['keyword_args']
is_temp = 1
type = dict_type
reject_duplicates = True
def calculate_constant_result(self):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = ((key.constant_result, value.constant_result)
for key, value in item.key_value_pairs)
else:
items = item.constant_result.iteritems()
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
self.constant_result = result
def compile_time_value(self, denv):
result = {}
reject_duplicates = self.reject_duplicates
for item in self.keyword_args:
if item.is_dict_literal:
# process items in order
items = [(key.compile_time_value(denv), value.compile_time_value(denv))
for key, value in item.key_value_pairs]
else:
items = item.compile_time_value(denv).iteritems()
try:
for key, value in items:
if reject_duplicates and key in result:
raise ValueError("duplicate keyword argument found: %s" % key)
result[key] = value
except Exception as e:
self.compile_time_value_error(e)
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return dict_type
def analyse_types(self, env):
self.keyword_args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after ** must be a mapping, not NoneType')
for arg in self.keyword_args
]
return self
def may_be_none(self):
return False
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
args = iter(self.keyword_args)
item = next(args)
item.generate_evaluation_code(code)
if item.type is not dict_type:
# CPython supports calling functions with non-dicts, so do we
code.putln('if (likely(PyDict_CheckExact(%s))) {' %
item.py_result())
if item.is_dict_literal:
item.make_owned_reference(code)
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = PyDict_Copy(%s); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), item.pos)))
code.put_gotref(self.result())
item.generate_disposal_code(code)
if item.type is not dict_type:
code.putln('} else {')
code.putln("%s = PyObject_CallFunctionObjArgs((PyObject*)&PyDict_Type, %s, NULL); %s" % (
self.result(),
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
item.generate_disposal_code(code)
code.putln('}')
item.free_temps(code)
helpers = set()
for item in args:
if item.is_dict_literal:
# inline update instead of creating an intermediate dict
for arg in item.key_value_pairs:
arg.generate_evaluation_code(code)
if self.reject_duplicates:
code.putln("if (unlikely(PyDict_Contains(%s, %s))) {" % (
self.result(),
arg.key.py_result()))
helpers.add("RaiseDoubleKeywords")
# FIXME: find out function name at runtime!
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
arg.key.py_result(),
code.error_goto(self.pos)))
code.putln("}")
code.put_error_if_neg(arg.key.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
arg.key.py_result(),
arg.value.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
else:
item.generate_evaluation_code(code)
if self.reject_duplicates:
# merge mapping into kwdict one by one as we need to check for duplicates
helpers.add("MergeKeywords")
code.put_error_if_neg(item.pos, "__Pyx_MergeKeywords(%s, %s)" % (
self.result(), item.py_result()))
else:
# simple case, just add all entries
helpers.add("RaiseMappingExpected")
code.putln("if (unlikely(PyDict_Update(%s, %s) < 0)) {" % (
self.result(), item.py_result()))
code.putln("if (PyErr_ExceptionMatches(PyExc_AttributeError)) "
"__Pyx_RaiseMappingExpectedError(%s);" % item.py_result())
code.putln(code.error_goto(item.pos))
code.putln("}")
item.generate_disposal_code(code)
item.free_temps(code)
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(helper, "FunctionArguments.c"))
def annotate(self, code):
for item in self.keyword_args:
item.annotate(code)
class AttributeNode(ExprNode):
# obj.attribute
#
# obj ExprNode
# attribute string
# needs_none_check boolean Used if obj is an extension type.
# If set to True, it is known that the type is not None.
#
# Used internally:
#
# is_py_attr boolean Is a Python getattr operation
# member string C name of struct member
# is_called boolean Function call is being done on result
# entry Entry Symbol table entry of attribute
is_attribute = 1
subexprs = ['obj']
type = PyrexTypes.error_type
entry = None
is_called = 0
needs_none_check = True
is_memslice_transpose = False
is_special_lookup = False
is_py_attr = 0
def as_cython_attribute(self):
if (isinstance(self.obj, NameNode) and
self.obj.is_cython_module and not
self.attribute == u"parallel"):
return self.attribute
cy = self.obj.as_cython_attribute()
if cy:
return "%s.%s" % (cy, self.attribute)
return None
def coerce_to(self, dst_type, env):
# If coercing to a generic pyobject and this is a cpdef function
# we can create the corresponding attribute
if dst_type is py_object_type:
entry = self.entry
if entry and entry.is_cfunction and entry.as_variable:
# must be a cpdef function
self.is_temp = 1
self.entry = entry.as_variable
self.analyse_as_python_attribute(env)
return self
return ExprNode.coerce_to(self, dst_type, env)
def calculate_constant_result(self):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
return
self.constant_result = getattr(self.obj.constant_result, attr)
def compile_time_value(self, denv):
attr = self.attribute
if attr.startswith("__") and attr.endswith("__"):
error(self.pos,
"Invalid attribute name '%s' in compile-time expression" % attr)
return None
obj = self.obj.compile_time_value(denv)
try:
return getattr(obj, attr)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return self.obj.type_dependencies(env)
def infer_type(self, env):
# FIXME: this is way too redundant with analyse_types()
node = self.analyse_as_cimported_attribute_node(env, target=False)
if node is not None:
if node.entry.type and node.entry.type.is_cfunction:
# special-case - function converted to pointer
return PyrexTypes.CPtrType(node.entry.type)
else:
return node.entry.type
node = self.analyse_as_type_attribute(env)
if node is not None:
return node.entry.type
obj_type = self.obj.infer_type(env)
self.analyse_attribute(env, obj_type=obj_type)
if obj_type.is_builtin_type and self.type.is_cfunction:
# special case: C-API replacements for C methods of
# builtin types cannot be inferred as C functions as
# that would prevent their use as bound methods
return py_object_type
elif self.entry and self.entry.is_cmethod:
# special case: bound methods should not be inferred
# as their unbound method types
return py_object_type
return self.type
def analyse_target_declaration(self, env):
pass
def analyse_target_types(self, env):
node = self.analyse_types(env, target = 1)
if node.type.is_const:
error(self.pos, "Assignment to const attribute '%s'" % self.attribute)
if not node.is_lvalue():
error(self.pos, "Assignment to non-lvalue of type '%s'" % self.type)
return node
def analyse_types(self, env, target = 0):
self.initialized_check = env.directives['initializedcheck']
node = self.analyse_as_cimported_attribute_node(env, target)
if node is None and not target:
node = self.analyse_as_type_attribute(env)
if node is None:
node = self.analyse_as_ordinary_attribute_node(env, target)
assert node is not None
if node.entry:
node.entry.used = True
if node.is_attribute:
node.wrap_obj_in_nonecheck(env)
return node
def analyse_as_cimported_attribute_node(self, env, target):
# Try to interpret this as a reference to an imported
# C const, type, var or function. If successful, mutates
# this node into a NameNode and returns 1, otherwise
# returns 0.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and (
entry.is_cglobal or entry.is_cfunction
or entry.is_type or entry.is_const):
return self.as_name_node(env, entry, target)
if self.is_cimported_module_without_shadow(env):
error(self.pos, "cimported module has no attribute '%s'" % self.attribute)
return self
return None
def analyse_as_type_attribute(self, env):
# Try to interpret this as a reference to an unbound
# C method of an extension type or builtin type. If successful,
# creates a corresponding NameNode and returns it, otherwise
# returns None.
if self.obj.is_string_literal:
return
type = self.obj.analyse_as_type(env)
if type:
if type.is_extension_type or type.is_builtin_type or type.is_cpp_class:
entry = type.scope.lookup_here(self.attribute)
if entry and (entry.is_cmethod or type.is_cpp_class and entry.type.is_cfunction):
if type.is_builtin_type:
if not self.is_called:
# must handle this as Python object
return None
ubcm_entry = entry
else:
# Create a temporary entry describing the C method
# as an ordinary function.
if entry.func_cname and not hasattr(entry.type, 'op_arg_struct'):
cname = entry.func_cname
if entry.type.is_static_method or (
env.parent_scope and env.parent_scope.is_cpp_class_scope):
ctype = entry.type
elif type.is_cpp_class:
error(self.pos, "%s not a static member of %s" % (entry.name, type))
ctype = PyrexTypes.error_type
else:
# Fix self type.
ctype = copy.copy(entry.type)
ctype.args = ctype.args[:]
ctype.args[0] = PyrexTypes.CFuncTypeArg('self', type, 'self', None)
else:
cname = "%s->%s" % (type.vtabptr_cname, entry.cname)
ctype = entry.type
ubcm_entry = Symtab.Entry(entry.name, cname, ctype)
ubcm_entry.is_cfunction = 1
ubcm_entry.func_cname = entry.func_cname
ubcm_entry.is_unbound_cmethod = 1
ubcm_entry.scope = entry.scope
return self.as_name_node(env, ubcm_entry, target=False)
elif type.is_enum:
if self.attribute in type.values:
for entry in type.entry.enum_values:
if entry.name == self.attribute:
return self.as_name_node(env, entry, target=False)
else:
error(self.pos, "%s not a known value of %s" % (self.attribute, type))
else:
error(self.pos, "%s not a known value of %s" % (self.attribute, type))
return None
def analyse_as_type(self, env):
module_scope = self.obj.analyse_as_module(env)
if module_scope:
return module_scope.lookup_type(self.attribute)
if not self.obj.is_string_literal:
base_type = self.obj.analyse_as_type(env)
if base_type and hasattr(base_type, 'scope') and base_type.scope is not None:
return base_type.scope.lookup_type(self.attribute)
return None
def analyse_as_extension_type(self, env):
# Try to interpret this as a reference to an extension type
# in a cimported module. Returns the extension type, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.is_type:
if entry.type.is_extension_type or entry.type.is_builtin_type:
return entry.type
return None
def analyse_as_module(self, env):
# Try to interpret this as a reference to a cimported module
# in another cimported module. Returns the module scope, or None.
module_scope = self.obj.analyse_as_module(env)
if module_scope:
entry = module_scope.lookup_here(self.attribute)
if entry and entry.as_module:
return entry.as_module
return None
def as_name_node(self, env, entry, target):
# Create a corresponding NameNode from this node and complete the
# analyse_types phase.
node = NameNode.from_node(self, name=self.attribute, entry=entry)
if target:
node = node.analyse_target_types(env)
else:
node = node.analyse_rvalue_entry(env)
node.entry.used = 1
return node
def analyse_as_ordinary_attribute_node(self, env, target):
self.obj = self.obj.analyse_types(env)
self.analyse_attribute(env)
if self.entry and self.entry.is_cmethod and not self.is_called:
# error(self.pos, "C method can only be called")
pass
## Reference to C array turns into pointer to first element.
#while self.type.is_array:
# self.type = self.type.element_ptr_type()
if self.is_py_attr:
if not target:
self.is_temp = 1
self.result_ctype = py_object_type
elif target and self.obj.type.is_builtin_type:
error(self.pos, "Assignment to an immutable object field")
#elif self.type.is_memoryviewslice and not target:
# self.is_temp = True
return self
def analyse_attribute(self, env, obj_type = None):
# Look up attribute and set self.type and self.member.
immutable_obj = obj_type is not None # used during type inference
self.is_py_attr = 0
self.member = self.attribute
if obj_type is None:
if self.obj.type.is_string or self.obj.type.is_pyunicode_ptr:
self.obj = self.obj.coerce_to_pyobject(env)
obj_type = self.obj.type
else:
if obj_type.is_string or obj_type.is_pyunicode_ptr:
obj_type = py_object_type
if obj_type.is_ptr or obj_type.is_array:
obj_type = obj_type.base_type
self.op = "->"
elif obj_type.is_extension_type or obj_type.is_builtin_type:
self.op = "->"
elif obj_type.is_reference and obj_type.is_fake_reference:
self.op = "->"
else:
self.op = "."
if obj_type.has_attributes:
if obj_type.attributes_known():
entry = obj_type.scope.lookup_here(self.attribute)
if obj_type.is_memoryviewslice and not entry:
if self.attribute == 'T':
self.is_memslice_transpose = True
self.is_temp = True
self.use_managed_ref = True
self.type = self.obj.type.transpose(self.pos)
return
else:
obj_type.declare_attribute(self.attribute, env, self.pos)
entry = obj_type.scope.lookup_here(self.attribute)
if entry and entry.is_member:
entry = None
else:
error(self.pos,
"Cannot select attribute of incomplete type '%s'"
% obj_type)
self.type = PyrexTypes.error_type
return
self.entry = entry
if entry:
if obj_type.is_extension_type and entry.name == "__weakref__":
error(self.pos, "Illegal use of special attribute __weakref__")
# def methods need the normal attribute lookup
# because they do not have struct entries
# fused function go through assignment synthesis
# (foo = pycfunction(foo_func_obj)) and need to go through
# regular Python lookup as well
if (entry.is_variable and not entry.fused_cfunction) or entry.is_cmethod:
self.type = entry.type
self.member = entry.cname
return
else:
# If it's not a variable or C method, it must be a Python
# method of an extension type, so we treat it like a Python
# attribute.
pass
# If we get here, the base object is not a struct/union/extension
# type, or it is an extension type and the attribute is either not
# declared or is declared as a Python method. Treat it as a Python
# attribute reference.
self.analyse_as_python_attribute(env, obj_type, immutable_obj)
def analyse_as_python_attribute(self, env, obj_type=None, immutable_obj=False):
if obj_type is None:
obj_type = self.obj.type
# mangle private '__*' Python attributes used inside of a class
self.attribute = env.mangle_class_private_name(self.attribute)
self.member = self.attribute
self.type = py_object_type
self.is_py_attr = 1
if not obj_type.is_pyobject and not obj_type.is_error:
# Expose python methods for immutable objects.
if (obj_type.is_string or obj_type.is_cpp_string
or obj_type.is_buffer or obj_type.is_memoryviewslice
or obj_type.is_numeric
or (obj_type.is_ctuple and obj_type.can_coerce_to_pyobject(env))
or (obj_type.is_struct and obj_type.can_coerce_to_pyobject(env))):
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
elif (obj_type.is_cfunction and (self.obj.is_name or self.obj.is_attribute)
and self.obj.entry.as_variable
and self.obj.entry.as_variable.type.is_pyobject):
# might be an optimised builtin function => unpack it
if not immutable_obj:
self.obj = self.obj.coerce_to_pyobject(env)
else:
error(self.pos,
"Object of type '%s' has no attribute '%s'" %
(obj_type, self.attribute))
def wrap_obj_in_nonecheck(self, env):
if not env.directives['nonecheck']:
return
msg = None
format_args = ()
if (self.obj.type.is_extension_type and self.needs_none_check and not
self.is_py_attr):
msg = "'NoneType' object has no attribute '%{0}s'".format('.30' if len(self.attribute) <= 30 else '')
format_args = (self.attribute,)
elif self.obj.type.is_memoryviewslice:
if self.is_memslice_transpose:
msg = "Cannot transpose None memoryview slice"
else:
entry = self.obj.type.scope.lookup_here(self.attribute)
if entry:
# copy/is_c_contig/shape/strides etc
msg = "Cannot access '%s' attribute of None memoryview slice"
format_args = (entry.name,)
if msg:
self.obj = self.obj.as_none_safe_node(msg, 'PyExc_AttributeError',
format_args=format_args)
def nogil_check(self, env):
if self.is_py_attr:
self.gil_error()
gil_message = "Accessing Python attribute"
def is_cimported_module_without_shadow(self, env):
return self.obj.is_cimported_module_without_shadow(env)
def is_simple(self):
if self.obj:
return self.result_in_temp() or self.obj.is_simple()
else:
return NameNode.is_simple(self)
def is_lvalue(self):
if self.obj:
return True
else:
return NameNode.is_lvalue(self)
def is_ephemeral(self):
if self.obj:
return self.obj.is_ephemeral()
else:
return NameNode.is_ephemeral(self)
def calculate_result_code(self):
#print "AttributeNode.calculate_result_code:", self.member ###
#print "...obj node =", self.obj, "code", self.obj.result() ###
#print "...obj type", self.obj.type, "ctype", self.obj.ctype() ###
obj = self.obj
obj_code = obj.result_as(obj.type)
#print "...obj_code =", obj_code ###
if self.entry and self.entry.is_cmethod:
if obj.type.is_extension_type and not self.entry.is_builtin_cmethod:
if self.entry.final_func_cname:
return self.entry.final_func_cname
if self.type.from_fused:
# If the attribute was specialized through indexing, make
# sure to get the right fused name, as our entry was
# replaced by our parent index node
# (AnalyseExpressionsTransform)
self.member = self.entry.cname
return "((struct %s *)%s%s%s)->%s" % (
obj.type.vtabstruct_cname, obj_code, self.op,
obj.type.vtabslot_cname, self.member)
elif self.result_is_used:
return self.member
# Generating no code at all for unused access to optimised builtin
# methods fixes the problem that some optimisations only exist as
# macros, i.e. there is no function pointer to them, so we would
# generate invalid C code here.
return
elif obj.type.is_complex:
return "__Pyx_C%s(%s)" % (self.member.upper(), obj_code)
else:
if obj.type.is_builtin_type and self.entry and self.entry.is_variable:
# accessing a field of a builtin type, need to cast better than result_as() does
obj_code = obj.type.cast_code(obj.result(), to_object_struct = True)
return "%s%s%s" % (obj_code, self.op, self.member)
def generate_result_code(self, code):
if self.is_py_attr:
if self.is_special_lookup:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectLookupSpecial", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_LookupSpecial'
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectGetAttrStr", "ObjectHandling.c"))
lookup_func_name = '__Pyx_PyObject_GetAttrStr'
code.putln(
'%s = %s(%s, %s); %s' % (
self.result(),
lookup_func_name,
self.obj.py_result(),
code.intern_identifier(self.attribute),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.type.is_memoryviewslice:
if self.is_memslice_transpose:
# transpose the slice
for access, packing in self.type.axes:
if access == 'ptr':
error(self.pos, "Transposing not supported for slices "
"with indirect dimensions")
return
code.putln("%s = %s;" % (self.result(), self.obj.result()))
code.put_incref_memoryviewslice(self.result(), have_gil=True)
T = "__pyx_memslice_transpose(&%s) == 0"
code.putln(code.error_goto_if(T % self.result(), self.pos))
elif self.initialized_check:
code.putln(
'if (unlikely(!%s.memview)) {'
'PyErr_SetString(PyExc_AttributeError,'
'"Memoryview is not initialized");'
'%s'
'}' % (self.result(), code.error_goto(self.pos)))
else:
# result_code contains what is needed, but we may need to insert
# a check and raise an exception
if self.obj.type and self.obj.type.is_extension_type:
pass
elif self.entry and self.entry.is_cmethod:
# C method implemented as function call with utility code
code.globalstate.use_entry_utility_code(self.entry)
def generate_disposal_code(self, code):
if self.is_temp and self.type.is_memoryviewslice and self.is_memslice_transpose:
# mirror condition for putting the memview incref here:
code.put_xdecref_memoryviewslice(
self.result(), have_gil=True)
code.putln("%s.memview = NULL;" % self.result())
code.putln("%s.data = NULL;" % self.result())
else:
ExprNode.generate_disposal_code(self, code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
self.obj.generate_evaluation_code(code)
if self.is_py_attr:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_SetAttrStr(%s, %s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute),
rhs.py_result()))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
elif self.obj.type.is_complex:
code.putln("__Pyx_SET_C%s(%s, %s);" % (
self.member.upper(),
self.obj.result_as(self.obj.type),
rhs.result_as(self.ctype())))
rhs.generate_disposal_code(code)
rhs.free_temps(code)
else:
select_code = self.result()
if self.type.is_pyobject and self.use_managed_ref:
rhs.make_owned_reference(code)
code.put_giveref(rhs.py_result())
code.put_gotref(select_code)
code.put_decref(select_code, self.ctype())
elif self.type.is_memoryviewslice:
from . import MemoryView
MemoryView.put_assign_to_memviewslice(
select_code, rhs, rhs.result(), self.type, code)
if not self.type.is_memoryviewslice:
code.putln(
"%s = %s;" % (
select_code,
rhs.result_as(self.ctype())))
#rhs.result()))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def generate_deletion_code(self, code, ignore_nonexisting=False):
self.obj.generate_evaluation_code(code)
if self.is_py_attr or (self.entry.scope.is_property_scope
and u'__del__' in self.entry.scope.entries):
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyObjectSetAttrStr", "ObjectHandling.c"))
code.put_error_if_neg(self.pos,
'__Pyx_PyObject_DelAttrStr(%s, %s)' % (
self.obj.py_result(),
code.intern_identifier(self.attribute)))
else:
error(self.pos, "Cannot delete C attribute of extension type")
self.obj.generate_disposal_code(code)
self.obj.free_temps(code)
def annotate(self, code):
if self.is_py_attr:
style, text = 'py_attr', 'python attribute (%s)'
else:
style, text = 'c_attr', 'c attribute (%s)'
code.annotate(self.pos, AnnotationItem(style, text % self.type, size=len(self.attribute)))
#-------------------------------------------------------------------
#
# Constructor nodes
#
#-------------------------------------------------------------------
class StarredUnpackingNode(ExprNode):
# A starred expression like "*a"
#
# This is only allowed in sequence assignment or construction such as
#
# a, *b = (1,2,3,4) => a = 1 ; b = [2,3,4]
#
# and will be special cased during type analysis (or generate an error
# if it's found at unexpected places).
#
# target ExprNode
subexprs = ['target']
is_starred = 1
type = py_object_type
is_temp = 1
starred_expr_allowed_here = False
def __init__(self, pos, target):
ExprNode.__init__(self, pos, target=target)
def analyse_declarations(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target.analyse_declarations(env)
def infer_type(self, env):
return self.target.infer_type(env)
def analyse_types(self, env):
if not self.starred_expr_allowed_here:
error(self.pos, "starred expression is not allowed here")
self.target = self.target.analyse_types(env)
self.type = self.target.type
return self
def analyse_target_declaration(self, env):
self.target.analyse_target_declaration(env)
def analyse_target_types(self, env):
self.target = self.target.analyse_target_types(env)
self.type = self.target.type
return self
def calculate_result_code(self):
return ""
def generate_result_code(self, code):
pass
class SequenceNode(ExprNode):
# Base class for list and tuple constructor nodes.
# Contains common code for performing sequence unpacking.
#
# args [ExprNode]
# unpacked_items [ExprNode] or None
# coerced_unpacked_items [ExprNode] or None
# mult_factor ExprNode the integer number of content repetitions ([1,2]*3)
subexprs = ['args', 'mult_factor']
is_sequence_constructor = 1
unpacked_items = None
mult_factor = None
slow = False # trade speed for code size (e.g. use PyTuple_Pack())
def compile_time_value_list(self, denv):
return [arg.compile_time_value(denv) for arg in self.args]
def replace_starred_target_node(self):
# replace a starred node in the targets by the contained expression
self.starred_assignment = False
args = []
for arg in self.args:
if arg.is_starred:
if self.starred_assignment:
error(arg.pos, "more than 1 starred expression in assignment")
self.starred_assignment = True
arg = arg.target
arg.is_starred = True
args.append(arg)
self.args = args
def analyse_target_declaration(self, env):
self.replace_starred_target_node()
for arg in self.args:
arg.analyse_target_declaration(env)
def analyse_types(self, env, skip_children=False):
for i, arg in enumerate(self.args):
if not skip_children:
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
if self.mult_factor:
self.mult_factor = self.mult_factor.analyse_types(env)
if not self.mult_factor.type.is_int:
self.mult_factor = self.mult_factor.coerce_to_pyobject(env)
self.is_temp = 1
# not setting self.type here, subtypes do this
return self
def coerce_to_ctuple(self, dst_type, env):
if self.type == dst_type:
return self
assert not self.mult_factor
if len(self.args) != dst_type.size:
error(self.pos, "trying to coerce sequence to ctuple of wrong length, expected %d, got %d" % (
dst_type.size, len(self.args)))
coerced_args = [arg.coerce_to(type, env) for arg, type in zip(self.args, dst_type.components)]
return TupleNode(self.pos, args=coerced_args, type=dst_type, is_temp=True)
def _create_merge_node_if_necessary(self, env):
self._flatten_starred_args()
if not any(arg.is_starred for arg in self.args):
return self
# convert into MergedSequenceNode by building partial sequences
args = []
values = []
for arg in self.args:
if arg.is_starred:
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
values = []
args.append(arg.target)
else:
values.append(arg)
if values:
args.append(TupleNode(values[0].pos, args=values).analyse_types(env, skip_children=True))
node = MergedSequenceNode(self.pos, args, self.type)
if self.mult_factor:
node = binop_node(
self.pos, '*', node, self.mult_factor.coerce_to_pyobject(env),
inplace=True, type=self.type, is_temp=True)
return node
def _flatten_starred_args(self):
args = []
for arg in self.args:
if arg.is_starred and arg.target.is_sequence_constructor and not arg.target.mult_factor:
args.extend(arg.target.args)
else:
args.append(arg)
self.args[:] = args
def may_be_none(self):
return False
def analyse_target_types(self, env):
if self.mult_factor:
error(self.pos, "can't assign to multiplied sequence")
self.unpacked_items = []
self.coerced_unpacked_items = []
self.any_coerced_items = False
for i, arg in enumerate(self.args):
arg = self.args[i] = arg.analyse_target_types(env)
if arg.is_starred:
if not arg.type.assignable_from(list_type):
error(arg.pos,
"starred target must have Python object (list) type")
if arg.type is py_object_type:
arg.type = list_type
unpacked_item = PyTempNode(self.pos, env)
coerced_unpacked_item = unpacked_item.coerce_to(arg.type, env)
if unpacked_item is not coerced_unpacked_item:
self.any_coerced_items = True
self.unpacked_items.append(unpacked_item)
self.coerced_unpacked_items.append(coerced_unpacked_item)
self.type = py_object_type
return self
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_sequence_packing_code(self, code, target=None, plain=False):
if target is None:
target = self.result()
size_factor = c_mult = ''
mult_factor = None
if self.mult_factor and not plain:
mult_factor = self.mult_factor
if mult_factor.type.is_int:
c_mult = mult_factor.result()
if (isinstance(mult_factor.constant_result, _py_int_types) and
mult_factor.constant_result > 0):
size_factor = ' * %s' % mult_factor.constant_result
elif mult_factor.type.signed:
size_factor = ' * ((%s<0) ? 0:%s)' % (c_mult, c_mult)
else:
size_factor = ' * (%s)' % (c_mult,)
if self.type is tuple_type and (self.is_literal or self.slow) and not c_mult:
# use PyTuple_Pack() to avoid generating huge amounts of one-time code
code.putln('%s = PyTuple_Pack(%d, %s); %s' % (
target,
len(self.args),
', '.join(arg.py_result() for arg in self.args),
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
elif self.type.is_ctuple:
for i, arg in enumerate(self.args):
code.putln("%s.f%s = %s;" % (
target, i, arg.result()))
else:
# build the tuple/list step by step, potentially multiplying it as we go
if self.type is list_type:
create_func, set_item_func = 'PyList_New', 'PyList_SET_ITEM'
elif self.type is tuple_type:
create_func, set_item_func = 'PyTuple_New', 'PyTuple_SET_ITEM'
else:
raise InternalError("sequence packing for unexpected type %s" % self.type)
arg_count = len(self.args)
code.putln("%s = %s(%s%s); %s" % (
target, create_func, arg_count, size_factor,
code.error_goto_if_null(target, self.pos)))
code.put_gotref(target)
if c_mult:
# FIXME: can't use a temp variable here as the code may
# end up in the constant building function. Temps
# currently don't work there.
#counter = code.funcstate.allocate_temp(mult_factor.type, manage_ref=False)
counter = Naming.quick_temp_cname
code.putln('{ Py_ssize_t %s;' % counter)
if arg_count == 1:
offset = counter
else:
offset = '%s * %s' % (counter, arg_count)
code.putln('for (%s=0; %s < %s; %s++) {' % (
counter, counter, c_mult, counter
))
else:
offset = ''
for i in range(arg_count):
arg = self.args[i]
if c_mult or not arg.result_in_temp():
code.put_incref(arg.result(), arg.ctype())
code.put_giveref(arg.py_result())
code.putln("%s(%s, %s, %s);" % (
set_item_func,
target,
(offset and i) and ('%s + %s' % (offset, i)) or (offset or i),
arg.py_result()))
if c_mult:
code.putln('}')
#code.funcstate.release_temp(counter)
code.putln('}')
if mult_factor is not None and mult_factor.type.is_pyobject:
code.putln('{ PyObject* %s = PyNumber_InPlaceMultiply(%s, %s); %s' % (
Naming.quick_temp_cname, target, mult_factor.py_result(),
code.error_goto_if_null(Naming.quick_temp_cname, self.pos)
))
code.put_gotref(Naming.quick_temp_cname)
code.put_decref(target, py_object_type)
code.putln('%s = %s;' % (target, Naming.quick_temp_cname))
code.putln('}')
def generate_subexpr_disposal_code(self, code):
if self.mult_factor and self.mult_factor.type.is_int:
super(SequenceNode, self).generate_subexpr_disposal_code(code)
elif self.type is tuple_type and (self.is_literal or self.slow):
super(SequenceNode, self).generate_subexpr_disposal_code(code)
else:
# We call generate_post_assignment_code here instead
# of generate_disposal_code, because values were stored
# in the tuple using a reference-stealing operation.
for arg in self.args:
arg.generate_post_assignment_code(code)
# Should NOT call free_temps -- this is invoked by the default
# generate_evaluation_code which will do that.
if self.mult_factor:
self.mult_factor.generate_disposal_code(code)
def generate_assignment_code(self, rhs, code, overloaded_assignment=False,
exception_check=None, exception_value=None):
if self.starred_assignment:
self.generate_starred_assignment_code(rhs, code)
else:
self.generate_parallel_assignment_code(rhs, code)
for item in self.unpacked_items:
item.release(code)
rhs.free_temps(code)
_func_iternext_type = PyrexTypes.CPtrType(PyrexTypes.CFuncType(
PyrexTypes.py_object_type, [
PyrexTypes.CFuncTypeArg("it", PyrexTypes.py_object_type, None),
]))
def generate_parallel_assignment_code(self, rhs, code):
# Need to work around the fact that generate_evaluation_code
# allocates the temps in a rather hacky way -- the assignment
# is evaluated twice, within each if-block.
for item in self.unpacked_items:
item.allocate(code)
special_unpack = (rhs.type is py_object_type
or rhs.type in (tuple_type, list_type)
or not rhs.type.is_builtin_type)
long_enough_for_a_loop = len(self.unpacked_items) > 3
if special_unpack:
self.generate_special_parallel_unpacking_code(
code, rhs, use_loop=long_enough_for_a_loop)
else:
code.putln("{")
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=long_enough_for_a_loop)
code.putln("}")
for value_node in self.coerced_unpacked_items:
value_node.generate_evaluation_code(code)
for i in range(len(self.args)):
self.args[i].generate_assignment_code(
self.coerced_unpacked_items[i], code)
def generate_special_parallel_unpacking_code(self, code, rhs, use_loop):
sequence_type_test = '1'
none_check = "likely(%s != Py_None)" % rhs.py_result()
if rhs.type is list_type:
sequence_types = ['List']
if rhs.may_be_none():
sequence_type_test = none_check
elif rhs.type is tuple_type:
sequence_types = ['Tuple']
if rhs.may_be_none():
sequence_type_test = none_check
else:
sequence_types = ['Tuple', 'List']
tuple_check = 'likely(PyTuple_CheckExact(%s))' % rhs.py_result()
list_check = 'PyList_CheckExact(%s)' % rhs.py_result()
sequence_type_test = "(%s) || (%s)" % (tuple_check, list_check)
code.putln("if (%s) {" % sequence_type_test)
code.putln("PyObject* sequence = %s;" % rhs.py_result())
# list/tuple => check size
code.putln("Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);")
code.putln("if (unlikely(size != %d)) {" % len(self.args))
code.globalstate.use_utility_code(raise_too_many_values_to_unpack)
code.putln("if (size > %d) __Pyx_RaiseTooManyValuesError(%d);" % (
len(self.args), len(self.args)))
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.putln("else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);")
# < 0 => exception
code.putln(code.error_goto(self.pos))
code.putln("}")
code.putln("#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS")
# unpack items from list/tuple in unrolled loop (can't fail)
if len(sequence_types) == 2:
code.putln("if (likely(Py%s_CheckExact(sequence))) {" % sequence_types[0])
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[0], i))
if len(sequence_types) == 2:
code.putln("} else {")
for i, item in enumerate(self.unpacked_items):
code.putln("%s = Py%s_GET_ITEM(sequence, %d); " % (
item.result(), sequence_types[1], i))
code.putln("}")
for item in self.unpacked_items:
code.put_incref(item.result(), item.ctype())
code.putln("#else")
# in non-CPython, use the PySequence protocol (which can fail)
if not use_loop:
for i, item in enumerate(self.unpacked_items):
code.putln("%s = PySequence_ITEM(sequence, %d); %s" % (
item.result(), i,
code.error_goto_if_null(item.result(), self.pos)))
code.put_gotref(item.result())
else:
code.putln("{")
code.putln("Py_ssize_t i;")
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in self.unpacked_items])))
code.putln("for (i=0; i < %s; i++) {" % len(self.unpacked_items))
code.putln("PyObject* item = PySequence_ITEM(sequence, i); %s" % (
code.error_goto_if_null('item', self.pos)))
code.put_gotref('item')
code.putln("*(temps[i]) = item;")
code.putln("}")
code.putln("}")
code.putln("#endif")
rhs.generate_disposal_code(code)
if sequence_type_test == '1':
code.putln("}") # all done
elif sequence_type_test == none_check:
# either tuple/list or None => save some code by generating the error directly
code.putln("} else {")
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseNoneIterError", "ObjectHandling.c"))
code.putln("__Pyx_RaiseNoneNotIterableError(); %s" % code.error_goto(self.pos))
code.putln("}") # all done
else:
code.putln("} else {") # needs iteration fallback code
self.generate_generic_parallel_unpacking_code(
code, rhs, self.unpacked_items, use_loop=use_loop)
code.putln("}")
def generate_generic_parallel_unpacking_code(self, code, rhs, unpacked_items, use_loop, terminate=True):
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
code.globalstate.use_utility_code(UtilityCode.load_cached("IterFinish", "ObjectHandling.c"))
code.putln("Py_ssize_t index = -1;") # must be at the start of a C block!
if use_loop:
code.putln("PyObject** temps[%s] = {%s};" % (
len(self.unpacked_items),
','.join(['&%s' % item.result() for item in unpacked_items])))
iterator_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln(
"%s = PyObject_GetIter(%s); %s" % (
iterator_temp,
rhs.py_result(),
code.error_goto_if_null(iterator_temp, self.pos)))
code.put_gotref(iterator_temp)
rhs.generate_disposal_code(code)
iternext_func = code.funcstate.allocate_temp(self._func_iternext_type, manage_ref=False)
code.putln("%s = Py_TYPE(%s)->tp_iternext;" % (
iternext_func, iterator_temp))
unpacking_error_label = code.new_label('unpacking_failed')
unpack_code = "%s(%s)" % (iternext_func, iterator_temp)
if use_loop:
code.putln("for (index=0; index < %s; index++) {" % len(unpacked_items))
code.put("PyObject* item = %s; if (unlikely(!item)) " % unpack_code)
code.put_goto(unpacking_error_label)
code.put_gotref("item")
code.putln("*(temps[index]) = item;")
code.putln("}")
else:
for i, item in enumerate(unpacked_items):
code.put(
"index = %d; %s = %s; if (unlikely(!%s)) " % (
i,
item.result(),
unpack_code,
item.result()))
code.put_goto(unpacking_error_label)
code.put_gotref(item.py_result())
if terminate:
code.globalstate.use_utility_code(
UtilityCode.load_cached("UnpackItemEndCheck", "ObjectHandling.c"))
code.put_error_if_neg(self.pos, "__Pyx_IternextUnpackEndCheck(%s, %d)" % (
unpack_code,
len(unpacked_items)))
code.putln("%s = NULL;" % iternext_func)
code.put_decref_clear(iterator_temp, py_object_type)
unpacking_done_label = code.new_label('unpacking_done')
code.put_goto(unpacking_done_label)
code.put_label(unpacking_error_label)
code.put_decref_clear(iterator_temp, py_object_type)
code.putln("%s = NULL;" % iternext_func)
code.putln("if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);")
code.putln(code.error_goto(self.pos))
code.put_label(unpacking_done_label)
code.funcstate.release_temp(iternext_func)
if terminate:
code.funcstate.release_temp(iterator_temp)
iterator_temp = None
return iterator_temp
def generate_starred_assignment_code(self, rhs, code):
for i, arg in enumerate(self.args):
if arg.is_starred:
starred_target = self.unpacked_items[i]
unpacked_fixed_items_left = self.unpacked_items[:i]
unpacked_fixed_items_right = self.unpacked_items[i+1:]
break
else:
assert False
iterator_temp = None
if unpacked_fixed_items_left:
for item in unpacked_fixed_items_left:
item.allocate(code)
code.putln('{')
iterator_temp = self.generate_generic_parallel_unpacking_code(
code, rhs, unpacked_fixed_items_left,
use_loop=True, terminate=False)
for i, item in enumerate(unpacked_fixed_items_left):
value_node = self.coerced_unpacked_items[i]
value_node.generate_evaluation_code(code)
code.putln('}')
starred_target.allocate(code)
target_list = starred_target.result()
code.putln("%s = PySequence_List(%s); %s" % (
target_list,
iterator_temp or rhs.py_result(),
code.error_goto_if_null(target_list, self.pos)))
code.put_gotref(target_list)
if iterator_temp:
code.put_decref_clear(iterator_temp, py_object_type)
code.funcstate.release_temp(iterator_temp)
else:
rhs.generate_disposal_code(code)
if unpacked_fixed_items_right:
code.globalstate.use_utility_code(raise_need_more_values_to_unpack)
length_temp = code.funcstate.allocate_temp(PyrexTypes.c_py_ssize_t_type, manage_ref=False)
code.putln('%s = PyList_GET_SIZE(%s);' % (length_temp, target_list))
code.putln("if (unlikely(%s < %d)) {" % (length_temp, len(unpacked_fixed_items_right)))
code.putln("__Pyx_RaiseNeedMoreValuesError(%d+%s); %s" % (
len(unpacked_fixed_items_left), length_temp,
code.error_goto(self.pos)))
code.putln('}')
for item in unpacked_fixed_items_right[::-1]:
item.allocate(code)
for i, (item, coerced_arg) in enumerate(zip(unpacked_fixed_items_right[::-1],
self.coerced_unpacked_items[::-1])):
code.putln('#if CYTHON_COMPILING_IN_CPYTHON')
code.putln("%s = PyList_GET_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
# resize the list the hard way
code.putln("((PyVarObject*)%s)->ob_size--;" % target_list)
code.putln('#else')
code.putln("%s = PySequence_ITEM(%s, %s-%d); " % (
item.py_result(), target_list, length_temp, i+1))
code.putln('#endif')
code.put_gotref(item.py_result())
coerced_arg.generate_evaluation_code(code)
code.putln('#if !CYTHON_COMPILING_IN_CPYTHON')
sublist_temp = code.funcstate.allocate_temp(py_object_type, manage_ref=True)
code.putln('%s = PySequence_GetSlice(%s, 0, %s-%d); %s' % (
sublist_temp, target_list, length_temp, len(unpacked_fixed_items_right),
code.error_goto_if_null(sublist_temp, self.pos)))
code.put_gotref(sublist_temp)
code.funcstate.release_temp(length_temp)
code.put_decref(target_list, py_object_type)
code.putln('%s = %s; %s = NULL;' % (target_list, sublist_temp, sublist_temp))
code.putln('#else')
code.putln('(void)%s;' % sublist_temp) # avoid warning about unused variable
code.funcstate.release_temp(sublist_temp)
code.putln('#endif')
for i, arg in enumerate(self.args):
arg.generate_assignment_code(self.coerced_unpacked_items[i], code)
def annotate(self, code):
for arg in self.args:
arg.annotate(code)
if self.unpacked_items:
for arg in self.unpacked_items:
arg.annotate(code)
for arg in self.coerced_unpacked_items:
arg.annotate(code)
class TupleNode(SequenceNode):
# Tuple constructor.
type = tuple_type
is_partly_literal = False
gil_message = "Constructing Python tuple"
def infer_type(self, env):
if self.mult_factor or not self.args:
return tuple_type
arg_types = [arg.infer_type(env) for arg in self.args]
if any(type.is_pyobject or type.is_memoryviewslice or type.is_unspecified or type.is_fused
for type in arg_types):
return tuple_type
return env.declare_tuple_type(self.pos, arg_types).type
def analyse_types(self, env, skip_children=False):
if len(self.args) == 0:
self.is_temp = False
self.is_literal = True
return self
if not skip_children:
for i, arg in enumerate(self.args):
if arg.is_starred:
arg.starred_expr_allowed_here = True
self.args[i] = arg.analyse_types(env)
if (not self.mult_factor and
not any((arg.is_starred or arg.type.is_pyobject or arg.type.is_memoryviewslice or arg.type.is_fused)
for arg in self.args)):
self.type = env.declare_tuple_type(self.pos, (arg.type for arg in self.args)).type
self.is_temp = 1
return self
node = SequenceNode.analyse_types(self, env, skip_children=True)
node = node._create_merge_node_if_necessary(env)
if not node.is_sequence_constructor:
return node
if not all(child.is_literal for child in node.args):
return node
if not node.mult_factor or (
node.mult_factor.is_literal and
isinstance(node.mult_factor.constant_result, _py_int_types)):
node.is_temp = False
node.is_literal = True
else:
if not node.mult_factor.type.is_pyobject:
node.mult_factor = node.mult_factor.coerce_to_pyobject(env)
node.is_temp = True
node.is_partly_literal = True
return node
def analyse_as_type(self, env):
# ctuple type
if not self.args:
return None
item_types = [arg.analyse_as_type(env) for arg in self.args]
if any(t is None for t in item_types):
return None
entry = env.declare_tuple_type(self.pos, item_types)
return entry.type
def coerce_to(self, dst_type, env):
if self.type.is_ctuple:
if dst_type.is_ctuple and self.type.size == dst_type.size:
return self.coerce_to_ctuple(dst_type, env)
elif dst_type is tuple_type or dst_type is py_object_type:
coerced_args = [arg.coerce_to_pyobject(env) for arg in self.args]
return TupleNode(self.pos, args=coerced_args, type=tuple_type, is_temp=1).analyse_types(env, skip_children=True)
else:
return self.coerce_to_pyobject(env).coerce_to(dst_type, env)
elif dst_type.is_ctuple and not self.mult_factor:
return self.coerce_to_ctuple(dst_type, env)
else:
return SequenceNode.coerce_to(self, dst_type, env)
def as_list(self):
t = ListNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, tuple):
t.constant_result = list(self.constant_result)
return t
def is_simple(self):
# either temp or constant => always simple
return True
def nonlocally_immutable(self):
# either temp or constant => always safe
return True
def calculate_result_code(self):
if len(self.args) > 0:
return self.result_code
else:
return Naming.empty_tuple
def calculate_constant_result(self):
self.constant_result = tuple([
arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = self.compile_time_value_list(denv)
try:
return tuple(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_operation_code(self, code):
if len(self.args) == 0:
# result_code is Naming.empty_tuple
return
if self.is_literal or self.is_partly_literal:
# The "mult_factor" is part of the deduplication if it is also constant, i.e. when
# we deduplicate the multiplied result. Otherwise, only deduplicate the constant part.
dedup_key = make_dedup_key(self.type, [self.mult_factor if self.is_literal else None] + self.args)
tuple_target = code.get_py_const(py_object_type, 'tuple', cleanup_level=2, dedup_key=dedup_key)
const_code = code.get_cached_constants_writer(tuple_target)
if const_code is not None:
# constant is not yet initialised
const_code.mark_pos(self.pos)
self.generate_sequence_packing_code(const_code, tuple_target, plain=not self.is_literal)
const_code.put_giveref(tuple_target)
if self.is_literal:
self.result_code = tuple_target
else:
code.putln('%s = PyNumber_Multiply(%s, %s); %s' % (
self.result(), tuple_target, self.mult_factor.py_result(),
code.error_goto_if_null(self.result(), self.pos)
))
code.put_gotref(self.py_result())
else:
self.type.entry.used = True
self.generate_sequence_packing_code(code)
class ListNode(SequenceNode):
# List constructor.
# obj_conversion_errors [PyrexError] used internally
# orignial_args [ExprNode] used internally
obj_conversion_errors = []
type = list_type
in_module_scope = False
gil_message = "Constructing Python list"
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TODO: Infer non-object list arrays.
return list_type
def analyse_expressions(self, env):
for arg in self.args:
if arg.is_starred:
arg.starred_expr_allowed_here = True
node = SequenceNode.analyse_expressions(self, env)
return node.coerce_to_pyobject(env)
def analyse_types(self, env):
with local_errors(ignore=True) as errors:
self.original_args = list(self.args)
node = SequenceNode.analyse_types(self, env)
node.obj_conversion_errors = errors
if env.is_module_scope:
self.in_module_scope = True
node = node._create_merge_node_if_necessary(env)
return node
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
elif (dst_type.is_array or dst_type.is_ptr) and dst_type.base_type is not PyrexTypes.c_void_type:
array_length = len(self.args)
if self.mult_factor:
if isinstance(self.mult_factor.constant_result, _py_int_types):
if self.mult_factor.constant_result <= 0:
error(self.pos, "Cannot coerce non-positively multiplied list to '%s'" % dst_type)
else:
array_length *= self.mult_factor.constant_result
else:
error(self.pos, "Cannot coerce dynamically multiplied list to '%s'" % dst_type)
base_type = dst_type.base_type
self.type = PyrexTypes.CArrayType(base_type, array_length)
for i in range(len(self.original_args)):
arg = self.args[i]
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(base_type, env)
elif dst_type.is_cpp_class:
# TODO(robertwb): Avoid object conversion for vector/list/set.
return TypecastNode(self.pos, operand=self, type=PyrexTypes.py_object_type).coerce_to(dst_type, env)
elif self.mult_factor:
error(self.pos, "Cannot coerce multiplied list to '%s'" % dst_type)
elif dst_type.is_struct:
if len(self.args) > len(dst_type.scope.var_entries):
error(self.pos, "Too many members for '%s'" % dst_type)
else:
if len(self.args) < len(dst_type.scope.var_entries):
warning(self.pos, "Too few members for '%s'" % dst_type, 1)
for i, (arg, member) in enumerate(zip(self.original_args, dst_type.scope.var_entries)):
if isinstance(arg, CoerceToPyTypeNode):
arg = arg.arg
self.args[i] = arg.coerce_to(member.type, env)
self.type = dst_type
elif dst_type.is_ctuple:
return self.coerce_to_ctuple(dst_type, env)
else:
self.type = error_type
error(self.pos, "Cannot coerce list to type '%s'" % dst_type)
return self
def as_list(self): # dummy for compatibility with TupleNode
return self
def as_tuple(self):
t = TupleNode(self.pos, args=self.args, mult_factor=self.mult_factor)
if isinstance(self.constant_result, list):
t.constant_result = tuple(self.constant_result)
return t
def allocate_temp_result(self, code):
if self.type.is_array:
if self.in_module_scope:
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, static=True, reusable=False)
else:
# To be valid C++, we must allocate the memory on the stack
# manually and be sure not to reuse it for something else.
# Yes, this means that we leak a temp array variable.
self.temp_code = code.funcstate.allocate_temp(
self.type, manage_ref=False, reusable=False)
else:
SequenceNode.allocate_temp_result(self, code)
def calculate_constant_result(self):
if self.mult_factor:
raise ValueError() # may exceed the compile time memory
self.constant_result = [
arg.constant_result for arg in self.args]
def compile_time_value(self, denv):
l = self.compile_time_value_list(denv)
if self.mult_factor:
l *= self.mult_factor.compile_time_value(denv)
return l
def generate_operation_code(self, code):
if self.type.is_pyobject:
for err in self.obj_conversion_errors:
report_error(err)
self.generate_sequence_packing_code(code)
elif self.type.is_array:
if self.mult_factor:
code.putln("{")
code.putln("Py_ssize_t %s;" % Naming.quick_temp_cname)
code.putln("for ({i} = 0; {i} < {count}; {i}++) {{".format(
i=Naming.quick_temp_cname, count=self.mult_factor.result()))
offset = '+ (%d * %s)' % (len(self.args), Naming.quick_temp_cname)
else:
offset = ''
for i, arg in enumerate(self.args):
if arg.type.is_array:
code.globalstate.use_utility_code(UtilityCode.load_cached("IncludeStringH", "StringTools.c"))
code.putln("memcpy(&(%s[%s%s]), %s, sizeof(%s[0]));" % (
self.result(), i, offset,
arg.result(), self.result()
))
else:
code.putln("%s[%s%s] = %s;" % (
self.result(),
i,
offset,
arg.result()))
if self.mult_factor:
code.putln("}")
code.putln("}")
elif self.type.is_struct:
for arg, member in zip(self.args, self.type.scope.var_entries):
code.putln("%s.%s = %s;" % (
self.result(),
member.cname,
arg.result()))
else:
raise InternalError("List type never specified")
class ScopedExprNode(ExprNode):
# Abstract base class for ExprNodes that have their own local
# scope, such as generator expressions.
#
# expr_scope Scope the inner scope of the expression
subexprs = []
expr_scope = None
# does this node really have a local scope, e.g. does it leak loop
# variables or not? non-leaking Py3 behaviour is default, except
# for list comprehensions where the behaviour differs in Py2 and
# Py3 (set in Parsing.py based on parser context)
has_local_scope = True
def init_scope(self, outer_scope, expr_scope=None):
if expr_scope is not None:
self.expr_scope = expr_scope
elif self.has_local_scope:
self.expr_scope = Symtab.GeneratorExpressionScope(outer_scope)
else:
self.expr_scope = None
def analyse_declarations(self, env):
self.init_scope(env)
def analyse_scoped_declarations(self, env):
# this is called with the expr_scope as env
pass
def analyse_types(self, env):
# no recursion here, the children will be analysed separately below
return self
def analyse_scoped_expressions(self, env):
# this is called with the expr_scope as env
return self
def generate_evaluation_code(self, code):
# set up local variables and free their references on exit
generate_inner_evaluation_code = super(ScopedExprNode, self).generate_evaluation_code
if not self.has_local_scope or not self.expr_scope.var_entries:
# no local variables => delegate, done
generate_inner_evaluation_code(code)
return
code.putln('{ /* enter inner scope */')
py_entries = []
for _, entry in sorted(item for item in self.expr_scope.entries.items() if item[0]):
if not entry.in_closure:
if entry.type.is_pyobject and entry.used:
py_entries.append(entry)
if not py_entries:
# no local Python references => no cleanup required
generate_inner_evaluation_code(code)
code.putln('} /* exit inner scope */')
return
# must free all local Python references at each exit point
old_loop_labels = code.new_loop_labels()
old_error_label = code.new_error_label()
generate_inner_evaluation_code(code)
# normal (non-error) exit
self._generate_vars_cleanup(code, py_entries)
# error/loop body exit points
exit_scope = code.new_label('exit_scope')
code.put_goto(exit_scope)
for label, old_label in ([(code.error_label, old_error_label)] +
list(zip(code.get_loop_labels(), old_loop_labels))):
if code.label_used(label):
code.put_label(label)
self._generate_vars_cleanup(code, py_entries)
code.put_goto(old_label)
code.put_label(exit_scope)
code.putln('} /* exit inner scope */')
code.set_loop_labels(old_loop_labels)
code.error_label = old_error_label
def _generate_vars_cleanup(self, code, py_entries):
for entry in py_entries:
if entry.is_cglobal:
code.put_var_gotref(entry)
code.put_decref_set(entry.cname, "Py_None")
else:
code.put_var_xdecref_clear(entry)
class ComprehensionNode(ScopedExprNode):
# A list/set/dict comprehension
child_attrs = ["loop"]
is_temp = True
constant_result = not_a_constant
def infer_type(self, env):
return self.type
def analyse_declarations(self, env):
self.append.target = self # this is used in the PyList_Append of the inner loop
self.init_scope(env)
def analyse_scoped_declarations(self, env):
self.loop.analyse_declarations(env)
def analyse_types(self, env):
if not self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def analyse_scoped_expressions(self, env):
if self.has_local_scope:
self.loop = self.loop.analyse_expressions(env)
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
self.generate_operation_code(code)
def generate_operation_code(self, code):
if self.type is Builtin.list_type:
create_code = 'PyList_New(0)'
elif self.type is Builtin.set_type:
create_code = 'PySet_New(NULL)'
elif self.type is Builtin.dict_type:
create_code = 'PyDict_New()'
else:
raise InternalError("illegal type for comprehension: %s" % self.type)
code.putln('%s = %s; %s' % (
self.result(), create_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
self.loop.generate_execution_code(code)
def annotate(self, code):
self.loop.annotate(code)
class ComprehensionAppendNode(Node):
# Need to be careful to avoid infinite recursion:
# target must not be in child_attrs/subexprs
child_attrs = ['expr']
target = None
type = PyrexTypes.c_int_type
def analyse_expressions(self, env):
self.expr = self.expr.analyse_expressions(env)
if not self.expr.type.is_pyobject:
self.expr = self.expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
if self.target.type is list_type:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ListCompAppend", "Optimize.c"))
function = "__Pyx_ListComp_Append"
elif self.target.type is set_type:
function = "PySet_Add"
else:
raise InternalError(
"Invalid type for comprehension node: %s" % self.target.type)
self.expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("%s(%s, (PyObject*)%s)" % (
function,
self.target.result(),
self.expr.result()
), self.pos))
self.expr.generate_disposal_code(code)
self.expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.expr.generate_function_definitions(env, code)
def annotate(self, code):
self.expr.annotate(code)
class DictComprehensionAppendNode(ComprehensionAppendNode):
child_attrs = ['key_expr', 'value_expr']
def analyse_expressions(self, env):
self.key_expr = self.key_expr.analyse_expressions(env)
if not self.key_expr.type.is_pyobject:
self.key_expr = self.key_expr.coerce_to_pyobject(env)
self.value_expr = self.value_expr.analyse_expressions(env)
if not self.value_expr.type.is_pyobject:
self.value_expr = self.value_expr.coerce_to_pyobject(env)
return self
def generate_execution_code(self, code):
self.key_expr.generate_evaluation_code(code)
self.value_expr.generate_evaluation_code(code)
code.putln(code.error_goto_if("PyDict_SetItem(%s, (PyObject*)%s, (PyObject*)%s)" % (
self.target.result(),
self.key_expr.result(),
self.value_expr.result()
), self.pos))
self.key_expr.generate_disposal_code(code)
self.key_expr.free_temps(code)
self.value_expr.generate_disposal_code(code)
self.value_expr.free_temps(code)
def generate_function_definitions(self, env, code):
self.key_expr.generate_function_definitions(env, code)
self.value_expr.generate_function_definitions(env, code)
def annotate(self, code):
self.key_expr.annotate(code)
self.value_expr.annotate(code)
class InlinedGeneratorExpressionNode(ExprNode):
# An inlined generator expression for which the result is calculated
# inside of the loop and returned as a single, first and only Generator
# return value.
# This will only be created by transforms when replacing safe builtin
# calls on generator expressions.
#
# gen GeneratorExpressionNode the generator, not containing any YieldExprNodes
# orig_func String the name of the builtin function this node replaces
# target ExprNode or None a 'target' for a ComprehensionAppend node
subexprs = ["gen"]
orig_func = None
target = None
is_temp = True
type = py_object_type
def __init__(self, pos, gen, comprehension_type=None, **kwargs):
gbody = gen.def_node.gbody
gbody.is_inlined = True
if comprehension_type is not None:
assert comprehension_type in (list_type, set_type, dict_type), comprehension_type
gbody.inlined_comprehension_type = comprehension_type
kwargs.update(
target=RawCNameExprNode(pos, comprehension_type, Naming.retval_cname),
type=comprehension_type,
)
super(InlinedGeneratorExpressionNode, self).__init__(pos, gen=gen, **kwargs)
def may_be_none(self):
return self.orig_func not in ('any', 'all', 'sorted')
def infer_type(self, env):
return self.type
def analyse_types(self, env):
self.gen = self.gen.analyse_expressions(env)
return self
def generate_result_code(self, code):
code.putln("%s = __Pyx_Generator_Next(%s); %s" % (
self.result(), self.gen.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class MergedSequenceNode(ExprNode):
"""
Merge a sequence of iterables into a set/list/tuple.
The target collection is determined by self.type, which must be set externally.
args [ExprNode]
"""
subexprs = ['args']
is_temp = True
gil_message = "Constructing Python collection"
def __init__(self, pos, args, type):
if type in (list_type, tuple_type) and args and args[0].is_sequence_constructor:
# construct a list directly from the first argument that we can then extend
if args[0].type is not list_type:
args[0] = ListNode(args[0].pos, args=args[0].args, is_temp=True)
ExprNode.__init__(self, pos, args=args, type=type)
def calculate_constant_result(self):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.constant_result <= 0:
continue
# otherwise, adding each item once should be enough
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.constant_result for arg in item.args)
else:
items = item.constant_result
result.extend(items)
if self.type is set_type:
result = set(result)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
self.constant_result = result
def compile_time_value(self, denv):
result = []
for item in self.args:
if item.is_sequence_constructor and item.mult_factor:
if item.mult_factor.compile_time_value(denv) <= 0:
continue
if item.is_set_literal or item.is_sequence_constructor:
# process items in order
items = (arg.compile_time_value(denv) for arg in item.args)
else:
items = item.compile_time_value(denv)
result.extend(items)
if self.type is set_type:
try:
result = set(result)
except Exception as e:
self.compile_time_value_error(e)
elif self.type is tuple_type:
result = tuple(result)
else:
assert self.type is list_type
return result
def type_dependencies(self, env):
return ()
def infer_type(self, env):
return self.type
def analyse_types(self, env):
args = [
arg.analyse_types(env).coerce_to_pyobject(env).as_none_safe_node(
# FIXME: CPython's error message starts with the runtime function name
'argument after * must be an iterable, not NoneType')
for arg in self.args
]
if len(args) == 1 and args[0].type is self.type:
# strip this intermediate node and use the bare collection
return args[0]
assert self.type in (set_type, list_type, tuple_type)
self.args = args
return self
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_set = self.type is set_type
args = iter(self.args)
item = next(args)
item.generate_evaluation_code(code)
if (is_set and item.is_set_literal or
not is_set and item.is_sequence_constructor and item.type is list_type):
code.putln("%s = %s;" % (self.result(), item.py_result()))
item.generate_post_assignment_code(code)
else:
code.putln("%s = %s(%s); %s" % (
self.result(),
'PySet_New' if is_set else 'PySequence_List',
item.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
item.generate_disposal_code(code)
item.free_temps(code)
helpers = set()
if is_set:
add_func = "PySet_Add"
extend_func = "__Pyx_PySet_Update"
else:
add_func = "__Pyx_ListComp_Append"
extend_func = "__Pyx_PyList_Extend"
for item in args:
if (is_set and (item.is_set_literal or item.is_sequence_constructor) or
(item.is_sequence_constructor and not item.mult_factor)):
if not is_set and item.args:
helpers.add(("ListCompAppend", "Optimize.c"))
for arg in item.args:
arg.generate_evaluation_code(code)
code.put_error_if_neg(arg.pos, "%s(%s, %s)" % (
add_func,
self.result(),
arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
continue
if is_set:
helpers.add(("PySet_Update", "Builtins.c"))
else:
helpers.add(("ListExtend", "Optimize.c"))
item.generate_evaluation_code(code)
code.put_error_if_neg(item.pos, "%s(%s, %s)" % (
extend_func,
self.result(),
item.py_result()))
item.generate_disposal_code(code)
item.free_temps(code)
if self.type is tuple_type:
code.putln("{")
code.putln("PyObject *%s = PyList_AsTuple(%s);" % (
Naming.quick_temp_cname,
self.result()))
code.put_decref(self.result(), py_object_type)
code.putln("%s = %s; %s" % (
self.result(),
Naming.quick_temp_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
code.putln("}")
for helper in sorted(helpers):
code.globalstate.use_utility_code(UtilityCode.load_cached(*helper))
def annotate(self, code):
for item in self.args:
item.annotate(code)
class SetNode(ExprNode):
"""
Set constructor.
"""
subexprs = ['args']
type = set_type
is_set_literal = True
gil_message = "Constructing Python set"
def analyse_types(self, env):
for i in range(len(self.args)):
arg = self.args[i]
arg = arg.analyse_types(env)
self.args[i] = arg.coerce_to_pyobject(env)
self.type = set_type
self.is_temp = 1
return self
def may_be_none(self):
return False
def calculate_constant_result(self):
self.constant_result = set([arg.constant_result for arg in self.args])
def compile_time_value(self, denv):
values = [arg.compile_time_value(denv) for arg in self.args]
try:
return set(values)
except Exception as e:
self.compile_time_value_error(e)
def generate_evaluation_code(self, code):
for arg in self.args:
arg.generate_evaluation_code(code)
self.allocate_temp_result(code)
code.putln(
"%s = PySet_New(0); %s" % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
for arg in self.args:
code.put_error_if_neg(
self.pos,
"PySet_Add(%s, %s)" % (self.result(), arg.py_result()))
arg.generate_disposal_code(code)
arg.free_temps(code)
class DictNode(ExprNode):
# Dictionary constructor.
#
# key_value_pairs [DictItemNode]
# exclude_null_values [boolean] Do not add NULL values to dict
#
# obj_conversion_errors [PyrexError] used internally
subexprs = ['key_value_pairs']
is_temp = 1
exclude_null_values = False
type = dict_type
is_dict_literal = True
reject_duplicates = False
obj_conversion_errors = []
@classmethod
def from_pairs(cls, pos, pairs):
return cls(pos, key_value_pairs=[
DictItemNode(pos, key=k, value=v) for k, v in pairs])
def calculate_constant_result(self):
self.constant_result = dict([
item.constant_result for item in self.key_value_pairs])
def compile_time_value(self, denv):
pairs = [(item.key.compile_time_value(denv), item.value.compile_time_value(denv))
for item in self.key_value_pairs]
try:
return dict(pairs)
except Exception as e:
self.compile_time_value_error(e)
def type_dependencies(self, env):
return ()
def infer_type(self, env):
# TODO: Infer struct constructors.
return dict_type
def analyse_types(self, env):
with local_errors(ignore=True) as errors:
self.key_value_pairs = [
item.analyse_types(env)
for item in self.key_value_pairs
]
self.obj_conversion_errors = errors
return self
def may_be_none(self):
return False
def coerce_to(self, dst_type, env):
if dst_type.is_pyobject:
self.release_errors()
if self.type.is_struct_or_union:
if not dict_type.subtype_of(dst_type):
error(self.pos, "Cannot interpret struct as non-dict type '%s'" % dst_type)
return DictNode(self.pos, key_value_pairs=[
DictItemNode(item.pos, key=item.key.coerce_to_pyobject(env),
value=item.value.coerce_to_pyobject(env))
for item in self.key_value_pairs])
if not self.type.subtype_of(dst_type):
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
elif dst_type.is_struct_or_union:
self.type = dst_type
if not dst_type.is_struct and len(self.key_value_pairs) != 1:
error(self.pos, "Exactly one field must be specified to convert to union '%s'" % dst_type)
elif dst_type.is_struct and len(self.key_value_pairs) < len(dst_type.scope.var_entries):
warning(self.pos, "Not all members given for struct '%s'" % dst_type, 1)
for item in self.key_value_pairs:
if isinstance(item.key, CoerceToPyTypeNode):
item.key = item.key.arg
if not item.key.is_string_literal:
error(item.key.pos, "Invalid struct field identifier")
item.key = StringNode(item.key.pos, value="")
else:
key = str(item.key.value) # converts string literals to unicode in Py3
member = dst_type.scope.lookup_here(key)
if not member:
error(item.key.pos, "struct '%s' has no field '%s'" % (dst_type, key))
else:
value = item.value
if isinstance(value, CoerceToPyTypeNode):
value = value.arg
item.value = value.coerce_to(member.type, env)
else:
self.type = error_type
error(self.pos, "Cannot interpret dict as type '%s'" % dst_type)
return self
def release_errors(self):
for err in self.obj_conversion_errors:
report_error(err)
self.obj_conversion_errors = []
gil_message = "Constructing Python dict"
def generate_evaluation_code(self, code):
# Custom method used here because key-value
# pairs are evaluated and used one at a time.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
is_dict = self.type.is_pyobject
if is_dict:
self.release_errors()
code.putln(
"%s = __Pyx_PyDict_NewPresized(%d); %s" % (
self.result(),
len(self.key_value_pairs),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
keys_seen = set()
key_type = None
needs_error_helper = False
for item in self.key_value_pairs:
item.generate_evaluation_code(code)
if is_dict:
if self.exclude_null_values:
code.putln('if (%s) {' % item.value.py_result())
key = item.key
if self.reject_duplicates:
if keys_seen is not None:
# avoid runtime 'in' checks for literals that we can do at compile time
if not key.is_string_literal:
keys_seen = None
elif key.value in keys_seen:
# FIXME: this could be a compile time error, at least in Cython code
keys_seen = None
elif key_type is not type(key.value):
if key_type is None:
key_type = type(key.value)
keys_seen.add(key.value)
else:
# different types => may not be able to compare at compile time
keys_seen = None
else:
keys_seen.add(key.value)
if keys_seen is None:
code.putln('if (unlikely(PyDict_Contains(%s, %s))) {' % (
self.result(), key.py_result()))
# currently only used in function calls
needs_error_helper = True
code.putln('__Pyx_RaiseDoubleKeywordsError("function", %s); %s' % (
key.py_result(),
code.error_goto(item.pos)))
code.putln("} else {")
code.put_error_if_neg(self.pos, "PyDict_SetItem(%s, %s, %s)" % (
self.result(),
item.key.py_result(),
item.value.py_result()))
if self.reject_duplicates and keys_seen is None:
code.putln('}')
if self.exclude_null_values:
code.putln('}')
else:
code.putln("%s.%s = %s;" % (
self.result(),
item.key.value,
item.value.result()))
item.generate_disposal_code(code)
item.free_temps(code)
if needs_error_helper:
code.globalstate.use_utility_code(
UtilityCode.load_cached("RaiseDoubleKeywords", "FunctionArguments.c"))
def annotate(self, code):
for item in self.key_value_pairs:
item.annotate(code)
class DictItemNode(ExprNode):
# Represents a single item in a DictNode
#
# key ExprNode
# value ExprNode
subexprs = ['key', 'value']
nogil_check = None # Parent DictNode takes care of it
def calculate_constant_result(self):
self.constant_result = (
self.key.constant_result, self.value.constant_result)
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
self.value = self.value.coerce_to_pyobject(env)
return self
def generate_evaluation_code(self, code):
self.key.generate_evaluation_code(code)
self.value.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.key.generate_disposal_code(code)
self.value.generate_disposal_code(code)
def free_temps(self, code):
self.key.free_temps(code)
self.value.free_temps(code)
def __iter__(self):
return iter([self.key, self.value])
class SortedDictKeysNode(ExprNode):
# build sorted list of dict keys, e.g. for dir()
subexprs = ['arg']
is_temp = True
def __init__(self, arg):
ExprNode.__init__(self, arg.pos, arg=arg)
self.type = Builtin.list_type
def analyse_types(self, env):
arg = self.arg.analyse_types(env)
if arg.type is Builtin.dict_type:
arg = arg.as_none_safe_node(
"'NoneType' object is not iterable")
self.arg = arg
return self
def may_be_none(self):
return False
def generate_result_code(self, code):
dict_result = self.arg.py_result()
if self.arg.type is Builtin.dict_type:
code.putln('%s = PyDict_Keys(%s); %s' % (
self.result(), dict_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
else:
# originally used PyMapping_Keys() here, but that may return a tuple
code.globalstate.use_utility_code(UtilityCode.load_cached(
'PyObjectCallMethod0', 'ObjectHandling.c'))
keys_cname = code.intern_identifier(StringEncoding.EncodedString("keys"))
code.putln('%s = __Pyx_PyObject_CallMethod0(%s, %s); %s' % (
self.result(), dict_result, keys_cname,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
code.putln("if (unlikely(!PyList_Check(%s))) {" % self.result())
code.put_decref_set(self.result(), "PySequence_List(%s)" % self.result())
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.py_result())
code.putln("}")
code.put_error_if_neg(
self.pos, 'PyList_Sort(%s)' % self.py_result())
class ModuleNameMixin(object):
def get_py_mod_name(self, code):
return code.get_py_string_const(
self.module_name, identifier=True)
def get_py_qualified_name(self, code):
return code.get_py_string_const(
self.qualname, identifier=True)
class ClassNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# class_def_node PyClassDefNode PyClassDefNode defining this class
# doc ExprNode or None Doc string
# module_name EncodedString Name of defining module
subexprs = ['doc']
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env)
self.doc = self.doc.coerce_to_pyobject(env)
env.use_utility_code(UtilityCode.load_cached("CreateClass", "ObjectHandling.c"))
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
class_def_node = self.class_def_node
cname = code.intern_identifier(self.name)
if self.doc:
code.put_error_if_neg(self.pos,
'PyDict_SetItem(%s, %s, %s)' % (
class_def_node.dict.py_result(),
code.intern_identifier(
StringEncoding.EncodedString("__doc__")),
self.doc.py_result()))
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
code.putln(
'%s = __Pyx_CreateClass(%s, %s, %s, %s, %s); %s' % (
self.result(),
class_def_node.bases.py_result(),
class_def_node.dict.py_result(),
cname,
qualname,
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class Py3ClassNode(ExprNode):
# Helper class used in the implementation of Python3+
# class definitions. Constructs a class object given
# a name, tuple of bases and class dictionary.
#
# name EncodedString Name of the class
# module_name EncodedString Name of defining module
# class_def_node PyClassDefNode PyClassDefNode defining this class
# calculate_metaclass bool should call CalculateMetaclass()
# allow_py2_metaclass bool should look for Py2 metaclass
subexprs = []
type = py_object_type
is_temp = True
def infer_type(self, env):
# TODO: could return 'type' in some cases
return py_object_type
def analyse_types(self, env):
return self
def may_be_none(self):
return True
gil_message = "Constructing Python class"
def generate_result_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("Py3ClassCreate", "ObjectHandling.c"))
cname = code.intern_identifier(self.name)
class_def_node = self.class_def_node
mkw = class_def_node.mkw.py_result() if class_def_node.mkw else 'NULL'
if class_def_node.metaclass:
metaclass = class_def_node.metaclass.py_result()
else:
metaclass = "((PyObject*)&__Pyx_DefaultClassType)"
code.putln(
'%s = __Pyx_Py3ClassCreate(%s, %s, %s, %s, %s, %d, %d); %s' % (
self.result(),
metaclass,
cname,
class_def_node.bases.py_result(),
class_def_node.dict.py_result(),
mkw,
self.calculate_metaclass,
self.allow_py2_metaclass,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassMetaclassNode(ExprNode):
# Helper class holds Python3 metaclass object
#
# class_def_node PyClassDefNode PyClassDefNode defining this class
subexprs = []
def analyse_types(self, env):
self.type = py_object_type
self.is_temp = True
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
bases = self.class_def_node.bases
mkw = self.class_def_node.mkw
if mkw:
code.globalstate.use_utility_code(
UtilityCode.load_cached("Py3MetaclassGet", "ObjectHandling.c"))
call = "__Pyx_Py3MetaclassGet(%s, %s)" % (
bases.result(),
mkw.result())
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CalculateMetaclass", "ObjectHandling.c"))
call = "__Pyx_CalculateMetaclass(NULL, %s)" % (
bases.result())
code.putln(
"%s = %s; %s" % (
self.result(), call,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class PyClassNamespaceNode(ExprNode, ModuleNameMixin):
# Helper class holds Python3 namespace object
#
# All this are not owned by this node
# class_def_node PyClassDefNode PyClassDefNode defining this class
# doc ExprNode or None Doc string (owned)
subexprs = ['doc']
def analyse_types(self, env):
if self.doc:
self.doc = self.doc.analyse_types(env).coerce_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
return self
def may_be_none(self):
return True
def generate_result_code(self, code):
cname = code.intern_identifier(self.name)
py_mod_name = self.get_py_mod_name(code)
qualname = self.get_py_qualified_name(code)
class_def_node = self.class_def_node
null = "(PyObject *) NULL"
doc_code = self.doc.result() if self.doc else null
mkw = class_def_node.mkw.py_result() if class_def_node.mkw else null
metaclass = class_def_node.metaclass.py_result() if class_def_node.metaclass else null
code.putln(
"%s = __Pyx_Py3MetaclassPrepare(%s, %s, %s, %s, %s, %s, %s); %s" % (
self.result(),
metaclass,
class_def_node.bases.result(),
cname,
qualname,
mkw,
py_mod_name,
doc_code,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class ClassCellInjectorNode(ExprNode):
# Initialize CyFunction.func_classobj
is_temp = True
type = py_object_type
subexprs = []
is_active = False
def analyse_expressions(self, env):
return self
def generate_result_code(self, code):
assert self.is_active
code.putln(
'%s = PyList_New(0); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
def generate_injection_code(self, code, classobj_cname):
assert self.is_active
code.globalstate.use_utility_code(
UtilityCode.load_cached("CyFunctionClassCell", "CythonFunction.c"))
code.put_error_if_neg(self.pos, '__Pyx_CyFunction_InitClassCell(%s, %s)' % (
self.result(), classobj_cname))
class ClassCellNode(ExprNode):
# Class Cell for noargs super()
subexprs = []
is_temp = True
is_generator = False
type = py_object_type
def analyse_types(self, env):
return self
def generate_result_code(self, code):
if not self.is_generator:
code.putln('%s = __Pyx_CyFunction_GetClassObj(%s);' % (
self.result(),
Naming.self_cname))
else:
code.putln('%s = %s->classobj;' % (
self.result(), Naming.generator_cname))
code.putln(
'if (!%s) { PyErr_SetString(PyExc_SystemError, '
'"super(): empty __class__ cell"); %s }' % (
self.result(),
code.error_goto(self.pos)))
code.put_incref(self.result(), py_object_type)
class PyCFunctionNode(ExprNode, ModuleNameMixin):
# Helper class used in the implementation of Python
# functions. Constructs a PyCFunction object
# from a PyMethodDef struct.
#
# pymethdef_cname string PyMethodDef structure
# self_object ExprNode or None
# binding bool
# def_node DefNode the Python function node
# module_name EncodedString Name of defining module
# code_object CodeObjectNode the PyCodeObject creator node
subexprs = ['code_object', 'defaults_tuple', 'defaults_kwdict',
'annotations_dict']
self_object = None
code_object = None
binding = False
def_node = None
defaults = None
defaults_struct = None
defaults_pyobjects = 0
defaults_tuple = None
defaults_kwdict = None
annotations_dict = None
type = py_object_type
is_temp = 1
specialized_cpdefs = None
is_specialization = False
@classmethod
def from_defnode(cls, node, binding):
return cls(node.pos,
def_node=node,
pymethdef_cname=node.entry.pymethdef_cname,
binding=binding or node.specialized_cpdefs,
specialized_cpdefs=node.specialized_cpdefs,
code_object=CodeObjectNode(node))
def analyse_types(self, env):
if self.binding:
self.analyse_default_args(env)
return self
def analyse_default_args(self, env):
"""
Handle non-literal function's default arguments.
"""
nonliteral_objects = []
nonliteral_other = []
default_args = []
default_kwargs = []
annotations = []
# For global cpdef functions and def/cpdef methods in cdef classes, we must use global constants
# for default arguments to avoid the dependency on the CyFunction object as 'self' argument
# in the underlying C function. Basically, cpdef functions/methods are static C functions,
# so their optional arguments must be static, too.
# TODO: change CyFunction implementation to pass both function object and owning object for method calls
must_use_constants = env.is_c_class_scope or (self.def_node.is_wrapper and env.is_module_scope)
for arg in self.def_node.args:
if arg.default and not must_use_constants:
if not arg.default.is_literal:
arg.is_dynamic = True
if arg.type.is_pyobject:
nonliteral_objects.append(arg)
else:
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
if arg.kw_only:
default_kwargs.append(arg)
else:
default_args.append(arg)
if arg.annotation:
arg.annotation = self.analyse_annotation(env, arg.annotation)
annotations.append((arg.pos, arg.name, arg.annotation))
for arg in (self.def_node.star_arg, self.def_node.starstar_arg):
if arg and arg.annotation:
arg.annotation = self.analyse_annotation(env, arg.annotation)
annotations.append((arg.pos, arg.name, arg.annotation))
annotation = self.def_node.return_type_annotation
if annotation:
annotation = self.analyse_annotation(env, annotation)
self.def_node.return_type_annotation = annotation
annotations.append((annotation.pos, StringEncoding.EncodedString("return"), annotation))
if nonliteral_objects or nonliteral_other:
module_scope = env.global_scope()
cname = module_scope.next_id(Naming.defaults_struct_prefix)
scope = Symtab.StructOrUnionScope(cname)
self.defaults = []
for arg in nonliteral_objects:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=True)
self.defaults.append((arg, entry))
for arg in nonliteral_other:
entry = scope.declare_var(arg.name, arg.type, None,
Naming.arg_prefix + arg.name,
allow_pyobject=False, allow_memoryview=True)
self.defaults.append((arg, entry))
entry = module_scope.declare_struct_or_union(
None, 'struct', scope, 1, None, cname=cname)
self.defaults_struct = scope
self.defaults_pyobjects = len(nonliteral_objects)
for arg, entry in self.defaults:
arg.default_value = '%s->%s' % (
Naming.dynamic_args_cname, entry.cname)
self.def_node.defaults_struct = self.defaults_struct.name
if default_args or default_kwargs:
if self.defaults_struct is None:
if default_args:
defaults_tuple = TupleNode(self.pos, args=[
arg.default for arg in default_args])
self.defaults_tuple = defaults_tuple.analyse_types(env).coerce_to_pyobject(env)
if default_kwargs:
defaults_kwdict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
arg.pos,
key=IdentifierStringNode(arg.pos, value=arg.name),
value=arg.default)
for arg in default_kwargs])
self.defaults_kwdict = defaults_kwdict.analyse_types(env)
else:
if default_args:
defaults_tuple = DefaultsTupleNode(
self.pos, default_args, self.defaults_struct)
else:
defaults_tuple = NoneNode(self.pos)
if default_kwargs:
defaults_kwdict = DefaultsKwDictNode(
self.pos, default_kwargs, self.defaults_struct)
else:
defaults_kwdict = NoneNode(self.pos)
defaults_getter = Nodes.DefNode(
self.pos, args=[], star_arg=None, starstar_arg=None,
body=Nodes.ReturnStatNode(
self.pos, return_type=py_object_type,
value=TupleNode(
self.pos, args=[defaults_tuple, defaults_kwdict])),
decorators=None,
name=StringEncoding.EncodedString("__defaults__"))
# defaults getter must never live in class scopes, it's always a module function
module_scope = env.global_scope()
defaults_getter.analyse_declarations(module_scope)
defaults_getter = defaults_getter.analyse_expressions(module_scope)
defaults_getter.body = defaults_getter.body.analyse_expressions(
defaults_getter.local_scope)
defaults_getter.py_wrapper_required = False
defaults_getter.pymethdef_required = False
self.def_node.defaults_getter = defaults_getter
if annotations:
annotations_dict = DictNode(self.pos, key_value_pairs=[
DictItemNode(
pos, key=IdentifierStringNode(pos, value=name),
value=value)
for pos, name, value in annotations])
self.annotations_dict = annotations_dict.analyse_types(env)
def analyse_annotation(self, env, annotation):
if annotation is None:
return None
atype = annotation.analyse_as_type(env)
if atype is not None:
# Keep parsed types as strings as they might not be Python representable.
annotation = UnicodeNode(
annotation.pos,
value=StringEncoding.EncodedString(atype.declaration_code('', for_display=True)))
annotation = annotation.analyse_types(env)
if not annotation.type.is_pyobject:
annotation = annotation.coerce_to_pyobject(env)
return annotation
def may_be_none(self):
return False
gil_message = "Constructing Python function"
def self_result_code(self):
if self.self_object is None:
self_result = "NULL"
else:
self_result = self.self_object.py_result()
return self_result
def generate_result_code(self, code):
if self.binding:
self.generate_cyfunction_code(code)
else:
self.generate_pycfunction_code(code)
def generate_pycfunction_code(self, code):
py_mod_name = self.get_py_mod_name(code)
code.putln(
'%s = PyCFunction_NewEx(&%s, %s, %s); %s' % (
self.result(),
self.pymethdef_cname,
self.self_result_code(),
py_mod_name,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def generate_cyfunction_code(self, code):
if self.specialized_cpdefs:
def_node = self.specialized_cpdefs[0]
else:
def_node = self.def_node
if self.specialized_cpdefs or self.is_specialization:
code.globalstate.use_utility_code(
UtilityCode.load_cached("FusedFunction", "CythonFunction.c"))
constructor = "__pyx_FusedFunction_New"
else:
code.globalstate.use_utility_code(
UtilityCode.load_cached("CythonFunction", "CythonFunction.c"))
constructor = "__Pyx_CyFunction_New"
if self.code_object:
code_object_result = self.code_object.py_result()
else:
code_object_result = 'NULL'
flags = []
if def_node.is_staticmethod:
flags.append('__Pyx_CYFUNCTION_STATICMETHOD')
elif def_node.is_classmethod:
flags.append('__Pyx_CYFUNCTION_CLASSMETHOD')
if def_node.local_scope.parent_scope.is_c_class_scope and not def_node.entry.is_anonymous:
flags.append('__Pyx_CYFUNCTION_CCLASS')
if flags:
flags = ' | '.join(flags)
else:
flags = '0'
code.putln(
'%s = %s(&%s, %s, %s, %s, %s, %s, %s); %s' % (
self.result(),
constructor,
self.pymethdef_cname,
flags,
self.get_py_qualified_name(code),
self.self_result_code(),
self.get_py_mod_name(code),
Naming.moddict_cname,
code_object_result,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
if def_node.requires_classobj:
assert code.pyclass_stack, "pyclass_stack is empty"
class_node = code.pyclass_stack[-1]
code.put_incref(self.py_result(), py_object_type)
code.putln(
'PyList_Append(%s, %s);' % (
class_node.class_cell.result(),
self.result()))
code.put_giveref(self.py_result())
if self.defaults:
code.putln(
'if (!__Pyx_CyFunction_InitDefaults(%s, sizeof(%s), %d)) %s' % (
self.result(), self.defaults_struct.name,
self.defaults_pyobjects, code.error_goto(self.pos)))
defaults = '__Pyx_CyFunction_Defaults(%s, %s)' % (
self.defaults_struct.name, self.result())
for arg, entry in self.defaults:
arg.generate_assignment_code(code, target='%s->%s' % (
defaults, entry.cname))
if self.defaults_tuple:
code.putln('__Pyx_CyFunction_SetDefaultsTuple(%s, %s);' % (
self.result(), self.defaults_tuple.py_result()))
if self.defaults_kwdict:
code.putln('__Pyx_CyFunction_SetDefaultsKwDict(%s, %s);' % (
self.result(), self.defaults_kwdict.py_result()))
if def_node.defaults_getter and not self.specialized_cpdefs:
# Fused functions do not support dynamic defaults, only their specialisations can have them for now.
code.putln('__Pyx_CyFunction_SetDefaultsGetter(%s, %s);' % (
self.result(), def_node.defaults_getter.entry.pyfunc_cname))
if self.annotations_dict:
code.putln('__Pyx_CyFunction_SetAnnotationsDict(%s, %s);' % (
self.result(), self.annotations_dict.py_result()))
class InnerFunctionNode(PyCFunctionNode):
# Special PyCFunctionNode that depends on a closure class
#
binding = True
needs_self_code = True
def self_result_code(self):
if self.needs_self_code:
return "((PyObject*)%s)" % Naming.cur_scope_cname
return "NULL"
class CodeObjectNode(ExprNode):
# Create a PyCodeObject for a CyFunction instance.
#
# def_node DefNode the Python function node
# varnames TupleNode a tuple with all local variable names
subexprs = ['varnames']
is_temp = False
result_code = None
def __init__(self, def_node):
ExprNode.__init__(self, def_node.pos, def_node=def_node)
args = list(def_node.args)
# if we have args/kwargs, then the first two in var_entries are those
local_vars = [arg for arg in def_node.local_scope.var_entries if arg.name]
self.varnames = TupleNode(
def_node.pos,
args=[IdentifierStringNode(arg.pos, value=arg.name)
for arg in args + local_vars],
is_temp=0,
is_literal=1)
def may_be_none(self):
return False
def calculate_result_code(self, code=None):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
return self.result_code
def generate_result_code(self, code):
if self.result_code is None:
self.result_code = code.get_py_const(py_object_type, 'codeobj', cleanup_level=2)
code = code.get_cached_constants_writer(self.result_code)
if code is None:
return # already initialised
code.mark_pos(self.pos)
func = self.def_node
func_name = code.get_py_string_const(
func.name, identifier=True, is_str=False, unicode_value=func.name)
# FIXME: better way to get the module file path at module init time? Encoding to use?
file_path = StringEncoding.bytes_literal(func.pos[0].get_filenametable_entry().encode('utf8'), 'utf8')
file_path_const = code.get_py_string_const(file_path, identifier=False, is_str=True)
# This combination makes CPython create a new dict for "frame.f_locals" (see GH #1836).
flags = ['CO_OPTIMIZED', 'CO_NEWLOCALS']
if self.def_node.star_arg:
flags.append('CO_VARARGS')
if self.def_node.starstar_arg:
flags.append('CO_VARKEYWORDS')
code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % (
self.result_code,
len(func.args) - func.num_kwonly_args, # argcount
func.num_kwonly_args, # kwonlyargcount (Py3 only)
len(self.varnames.args), # nlocals
'|'.join(flags) or '0', # flags
Naming.empty_bytes, # code
Naming.empty_tuple, # consts
Naming.empty_tuple, # names (FIXME)
self.varnames.result(), # varnames
Naming.empty_tuple, # freevars (FIXME)
Naming.empty_tuple, # cellvars (FIXME)
file_path_const, # filename
func_name, # name
self.pos[1], # firstlineno
Naming.empty_bytes, # lnotab
code.error_goto_if_null(self.result_code, self.pos),
))
class DefaultLiteralArgNode(ExprNode):
# CyFunction's literal argument default value
#
# Evaluate literal only once.
subexprs = []
is_literal = True
is_temp = False
def __init__(self, pos, arg):
super(DefaultLiteralArgNode, self).__init__(pos)
self.arg = arg
self.type = self.arg.type
self.evaluated = False
def analyse_types(self, env):
return self
def generate_result_code(self, code):
pass
def generate_evaluation_code(self, code):
if not self.evaluated:
self.arg.generate_evaluation_code(code)
self.evaluated = True
def result(self):
return self.type.cast_code(self.arg.result())
class DefaultNonLiteralArgNode(ExprNode):
# CyFunction's non-literal argument default value
subexprs = []
def __init__(self, pos, arg, defaults_struct):
super(DefaultNonLiteralArgNode, self).__init__(pos)
self.arg = arg
self.defaults_struct = defaults_struct
def analyse_types(self, env):
self.type = self.arg.type
self.is_temp = False
return self
def generate_result_code(self, code):
pass
def result(self):
return '__Pyx_CyFunction_Defaults(%s, %s)->%s' % (
self.defaults_struct.name, Naming.self_cname,
self.defaults_struct.lookup(self.arg.name).cname)
class DefaultsTupleNode(TupleNode):
# CyFunction's __defaults__ tuple
def __init__(self, pos, defaults, defaults_struct):
args = []
for arg in defaults:
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
args.append(arg)
super(DefaultsTupleNode, self).__init__(pos, args=args)
def analyse_types(self, env, skip_children=False):
return super(DefaultsTupleNode, self).analyse_types(env, skip_children).coerce_to_pyobject(env)
class DefaultsKwDictNode(DictNode):
# CyFunction's __kwdefaults__ dict
def __init__(self, pos, defaults, defaults_struct):
items = []
for arg in defaults:
name = IdentifierStringNode(arg.pos, value=arg.name)
if not arg.default.is_literal:
arg = DefaultNonLiteralArgNode(pos, arg, defaults_struct)
else:
arg = arg.default
items.append(DictItemNode(arg.pos, key=name, value=arg))
super(DefaultsKwDictNode, self).__init__(pos, key_value_pairs=items)
class LambdaNode(InnerFunctionNode):
# Lambda expression node (only used as a function reference)
#
# args [CArgDeclNode] formal arguments
# star_arg PyArgDeclNode or None * argument
# starstar_arg PyArgDeclNode or None ** argument
# lambda_name string a module-globally unique lambda name
# result_expr ExprNode
# def_node DefNode the underlying function 'def' node
child_attrs = ['def_node']
name = StringEncoding.EncodedString('')
def analyse_declarations(self, env):
self.lambda_name = self.def_node.lambda_name = env.next_id('lambda')
self.def_node.no_assignment_synthesis = True
self.def_node.pymethdef_required = True
self.def_node.analyse_declarations(env)
self.def_node.is_cyfunction = True
self.pymethdef_cname = self.def_node.entry.pymethdef_cname
env.add_lambda_def(self.def_node)
def analyse_types(self, env):
self.def_node = self.def_node.analyse_expressions(env)
return super(LambdaNode, self).analyse_types(env)
def generate_result_code(self, code):
self.def_node.generate_execution_code(code)
super(LambdaNode, self).generate_result_code(code)
class GeneratorExpressionNode(LambdaNode):
# A generator expression, e.g. (i for i in range(10))
#
# Result is a generator.
#
# loop ForStatNode the for-loop, containing a YieldExprNode
# def_node DefNode the underlying generator 'def' node
name = StringEncoding.EncodedString('genexpr')
binding = False
def analyse_declarations(self, env):
self.genexpr_name = env.next_id('genexpr')
super(GeneratorExpressionNode, self).analyse_declarations(env)
# No pymethdef required
self.def_node.pymethdef_required = False
self.def_node.py_wrapper_required = False
self.def_node.is_cyfunction = False
# Force genexpr signature
self.def_node.entry.signature = TypeSlots.pyfunction_noargs
def generate_result_code(self, code):
code.putln(
'%s = %s(%s); %s' % (
self.result(),
self.def_node.entry.pyfunc_cname,
self.self_result_code(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class YieldExprNode(ExprNode):
# Yield expression node
#
# arg ExprNode the value to return from the generator
# label_num integer yield label number
# is_yield_from boolean is a YieldFromExprNode to delegate to another generator
subexprs = ['arg']
type = py_object_type
label_num = 0
is_yield_from = False
is_await = False
in_async_gen = False
expr_keyword = 'yield'
def analyse_types(self, env):
if not self.label_num or (self.is_yield_from and self.in_async_gen):
error(self.pos, "'%s' not supported here" % self.expr_keyword)
self.is_temp = 1
if self.arg is not None:
self.arg = self.arg.analyse_types(env)
if not self.arg.type.is_pyobject:
self.coerce_yield_argument(env)
return self
def coerce_yield_argument(self, env):
self.arg = self.arg.coerce_to_pyobject(env)
def generate_evaluation_code(self, code):
if self.arg:
self.arg.generate_evaluation_code(code)
self.arg.make_owned_reference(code)
code.putln(
"%s = %s;" % (
Naming.retval_cname,
self.arg.result_as(py_object_type)))
self.arg.generate_post_assignment_code(code)
self.arg.free_temps(code)
else:
code.put_init_to_py_none(Naming.retval_cname, py_object_type)
self.generate_yield_code(code)
def generate_yield_code(self, code):
"""
Generate the code to return the argument in 'Naming.retval_cname'
and to continue at the yield label.
"""
label_num, label_name = code.new_yield_label(
self.expr_keyword.replace(' ', '_'))
code.use_label(label_name)
saved = []
code.funcstate.closure_temps.reset()
for cname, type, manage_ref in code.funcstate.temps_in_use():
save_cname = code.funcstate.closure_temps.allocate_temp(type)
saved.append((cname, save_cname, type))
if type.is_pyobject:
code.put_xgiveref(cname)
code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname))
code.put_xgiveref(Naming.retval_cname)
profile = code.globalstate.directives['profile']
linetrace = code.globalstate.directives['linetrace']
if profile or linetrace:
code.put_trace_return(Naming.retval_cname,
nogil=not code.funcstate.gil_owned)
code.put_finish_refcount_context()
if code.funcstate.current_except is not None:
# inside of an except block => save away currently handled exception
code.putln("__Pyx_Coroutine_SwapException(%s);" % Naming.generator_cname)
else:
# no exceptions being handled => restore exception state of caller
code.putln("__Pyx_Coroutine_ResetAndClearException(%s);" % Naming.generator_cname)
code.putln("/* return from %sgenerator, %sing value */" % (
'async ' if self.in_async_gen else '',
'await' if self.is_await else 'yield'))
code.putln("%s->resume_label = %d;" % (
Naming.generator_cname, label_num))
if self.in_async_gen and not self.is_await:
# __Pyx__PyAsyncGenValueWrapperNew() steals a reference to the return value
code.putln("return __Pyx__PyAsyncGenValueWrapperNew(%s);" % Naming.retval_cname)
else:
code.putln("return %s;" % Naming.retval_cname)
code.put_label(label_name)
for cname, save_cname, type in saved:
code.putln('%s = %s->%s;' % (cname, Naming.cur_scope_cname, save_cname))
if type.is_pyobject:
code.putln('%s->%s = 0;' % (Naming.cur_scope_cname, save_cname))
code.put_xgotref(cname)
self.generate_sent_value_handling_code(code, Naming.sent_value_cname)
if self.result_is_used:
self.allocate_temp_result(code)
code.put('%s = %s; ' % (self.result(), Naming.sent_value_cname))
code.put_incref(self.result(), py_object_type)
def generate_sent_value_handling_code(self, code, value_cname):
code.putln(code.error_goto_if_null(value_cname, self.pos))
class _YieldDelegationExprNode(YieldExprNode):
def yield_from_func(self, code):
raise NotImplementedError()
def generate_evaluation_code(self, code, source_cname=None, decref_source=False):
if source_cname is None:
self.arg.generate_evaluation_code(code)
code.putln("%s = %s(%s, %s);" % (
Naming.retval_cname,
self.yield_from_func(code),
Naming.generator_cname,
self.arg.py_result() if source_cname is None else source_cname))
if source_cname is None:
self.arg.generate_disposal_code(code)
self.arg.free_temps(code)
elif decref_source:
code.put_decref_clear(source_cname, py_object_type)
code.put_xgotref(Naming.retval_cname)
code.putln("if (likely(%s)) {" % Naming.retval_cname)
self.generate_yield_code(code)
code.putln("} else {")
# either error or sub-generator has normally terminated: return value => node result
if self.result_is_used:
self.fetch_iteration_result(code)
else:
self.handle_iteration_exception(code)
code.putln("}")
def fetch_iteration_result(self, code):
# YieldExprNode has allocated the result temp for us
code.putln("%s = NULL;" % self.result())
code.put_error_if_neg(self.pos, "__Pyx_PyGen_FetchStopIterationValue(&%s)" % self.result())
code.put_gotref(self.result())
def handle_iteration_exception(self, code):
code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
code.putln("if (exc_type) {")
code.putln("if (likely(exc_type == PyExc_StopIteration || (exc_type != PyExc_GeneratorExit &&"
" __Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))) PyErr_Clear();")
code.putln("else %s" % code.error_goto(self.pos))
code.putln("}")
class YieldFromExprNode(_YieldDelegationExprNode):
# "yield from GEN" expression
is_yield_from = True
expr_keyword = 'yield from'
def coerce_yield_argument(self, env):
if not self.arg.type.is_string:
# FIXME: support C arrays and C++ iterators?
error(self.pos, "yielding from non-Python object not supported")
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("GeneratorYieldFrom", "Coroutine.c"))
return "__Pyx_Generator_Yield_From"
class AwaitExprNode(_YieldDelegationExprNode):
# 'await' expression node
#
# arg ExprNode the Awaitable value to await
# label_num integer yield label number
is_await = True
expr_keyword = 'await'
def coerce_yield_argument(self, env):
if self.arg is not None:
# FIXME: use same check as in YieldFromExprNode.coerce_yield_argument() ?
self.arg = self.arg.coerce_to_pyobject(env)
def yield_from_func(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("CoroutineYieldFrom", "Coroutine.c"))
return "__Pyx_Coroutine_Yield_From"
class AwaitIterNextExprNode(AwaitExprNode):
# 'await' expression node as part of 'async for' iteration
#
# Breaks out of loop on StopAsyncIteration exception.
def _generate_break(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("StopAsyncIteration", "Coroutine.c"))
code.putln("PyObject* exc_type = __Pyx_PyErr_Occurred();")
code.putln("if (unlikely(exc_type && (exc_type == __Pyx_PyExc_StopAsyncIteration || ("
" exc_type != PyExc_StopIteration && exc_type != PyExc_GeneratorExit &&"
" __Pyx_PyErr_GivenExceptionMatches(exc_type, __Pyx_PyExc_StopAsyncIteration))))) {")
code.putln("PyErr_Clear();")
code.putln("break;")
code.putln("}")
def fetch_iteration_result(self, code):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
self._generate_break(code)
super(AwaitIterNextExprNode, self).fetch_iteration_result(code)
def generate_sent_value_handling_code(self, code, value_cname):
assert code.break_label, "AwaitIterNextExprNode outside of 'async for' loop"
code.putln("if (unlikely(!%s)) {" % value_cname)
self._generate_break(code)
# all non-break exceptions are errors, as in parent class
code.putln(code.error_goto(self.pos))
code.putln("}")
class GlobalsExprNode(AtomicExprNode):
type = dict_type
is_temp = 1
def analyse_types(self, env):
env.use_utility_code(Builtin.globals_utility_code)
return self
gil_message = "Constructing globals dict"
def may_be_none(self):
return False
def generate_result_code(self, code):
code.putln('%s = __Pyx_Globals(); %s' % (
self.result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
class LocalsDictItemNode(DictItemNode):
def analyse_types(self, env):
self.key = self.key.analyse_types(env)
self.value = self.value.analyse_types(env)
self.key = self.key.coerce_to_pyobject(env)
if self.value.type.can_coerce_to_pyobject(env):
self.value = self.value.coerce_to_pyobject(env)
else:
self.value = None
return self
class FuncLocalsExprNode(DictNode):
def __init__(self, pos, env):
local_vars = sorted([
entry.name for entry in env.entries.values() if entry.name])
items = [LocalsDictItemNode(
pos, key=IdentifierStringNode(pos, value=var),
value=NameNode(pos, name=var, allow_null=True))
for var in local_vars]
DictNode.__init__(self, pos, key_value_pairs=items,
exclude_null_values=True)
def analyse_types(self, env):
node = super(FuncLocalsExprNode, self).analyse_types(env)
node.key_value_pairs = [ i for i in node.key_value_pairs
if i.value is not None ]
return node
class PyClassLocalsExprNode(AtomicExprNode):
def __init__(self, pos, pyclass_dict):
AtomicExprNode.__init__(self, pos)
self.pyclass_dict = pyclass_dict
def analyse_types(self, env):
self.type = self.pyclass_dict.type
self.is_temp = False
return self
def may_be_none(self):
return False
def result(self):
return self.pyclass_dict.result()
def generate_result_code(self, code):
pass
def LocalsExprNode(pos, scope_node, env):
if env.is_module_scope:
return GlobalsExprNode(pos)
if env.is_py_class_scope:
return PyClassLocalsExprNode(pos, scope_node.dict)
return FuncLocalsExprNode(pos, env)
#-------------------------------------------------------------------
#
# Unary operator nodes
#
#-------------------------------------------------------------------
compile_time_unary_operators = {
'not': operator.not_,
'~': operator.inv,
'-': operator.neg,
'+': operator.pos,
}
class UnopNode(ExprNode):
# operator string
# operand ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when the operand is not a pyobject.
# - Check operand type and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand']
infix = True
def calculate_constant_result(self):
func = compile_time_unary_operators[self.operator]
self.constant_result = func(self.operand.constant_result)
def compile_time_value(self, denv):
func = compile_time_unary_operators.get(self.operator)
if not func:
error(self.pos,
"Unary '%s' not supported in compile-time expression"
% self.operator)
operand = self.operand.compile_time_value(denv)
try:
return func(operand)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
operand_type = self.operand.infer_type(env)
if operand_type.is_cpp_class or operand_type.is_ptr:
cpp_type = operand_type.find_cpp_operation_type(self.operator)
if cpp_type is not None:
return cpp_type
return self.infer_unop_type(env, operand_type)
def infer_unop_type(self, env, operand_type):
if operand_type.is_pyobject:
return py_object_type
else:
return operand_type
def may_be_none(self):
if self.operand.type and self.operand.type.is_builtin_type:
if self.operand.type is not type_type:
return False
return ExprNode.may_be_none(self)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
if self.is_pythran_operation(env):
self.type = PythranExpr(pythran_unaryop_type(self.operator, self.operand.type))
self.is_temp = 1
elif self.is_py_operation():
self.coerce_operand_to_pyobject(env)
self.type = py_object_type
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
return self
def check_const(self):
return self.operand.check_const()
def is_py_operation(self):
return self.operand.type.is_pyobject or self.operand.type.is_ctuple
def is_pythran_operation(self, env):
np_pythran = has_np_pythran(env)
op_type = self.operand.type
return np_pythran and (op_type.is_buffer or op_type.is_pythran_expr)
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def is_cpp_operation(self):
type = self.operand.type
return type.is_cpp_class
def coerce_operand_to_pyobject(self, env):
self.operand = self.operand.coerce_to_pyobject(env)
def generate_result_code(self, code):
if self.type.is_pythran_expr:
code.putln("// Pythran unaryop")
code.putln("__Pyx_call_destructor(%s);" % self.result())
code.putln("new (&%s) decltype(%s){%s%s};" % (
self.result(),
self.result(),
self.operator,
self.operand.pythran_result()))
elif self.operand.type.is_pyobject:
self.generate_py_operation_code(code)
elif self.is_temp:
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s %s;" % (self.result(), self.operator, self.operand.result()))
def generate_py_operation_code(self, code):
function = self.py_operation_function(code)
code.putln(
"%s = %s(%s); %s" % (
self.result(),
function,
self.operand.py_result(),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
def type_error(self):
if not self.operand.type.is_error:
error(self.pos, "Invalid operand type for '%s' (%s)" %
(self.operator, self.operand.type))
self.type = PyrexTypes.error_type
def analyse_cpp_operation(self, env, overload_check=True):
entry = env.lookup_operator(self.operator, [self.operand])
if overload_check and not entry:
self.type_error()
return
if entry:
self.exception_check = entry.type.exception_check
self.exception_value = entry.type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
else:
self.exception_check = ''
self.exception_value = ''
cpp_type = self.operand.type.find_cpp_operation_type(self.operator)
if overload_check and cpp_type is None:
error(self.pos, "'%s' operator not defined for %s" % (
self.operator, type))
self.type_error()
return
self.type = cpp_type
class NotNode(UnopNode):
# 'not' operator
#
# operand ExprNode
operator = '!'
type = PyrexTypes.c_bint_type
def calculate_constant_result(self):
self.constant_result = not self.operand.constant_result
def compile_time_value(self, denv):
operand = self.operand.compile_time_value(denv)
try:
return not operand
except Exception as e:
self.compile_time_value_error(e)
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_bint_type
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
operand_type = self.operand.type
if operand_type.is_cpp_class:
self.analyse_cpp_operation(env)
else:
self.operand = self.operand.coerce_to_boolean(env)
return self
def calculate_result_code(self):
return "(!%s)" % self.operand.result()
class UnaryPlusNode(UnopNode):
# unary '+' operator
operator = '+'
def analyse_c_operation(self, env):
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
def py_operation_function(self, code):
return "PyNumber_Positive"
def calculate_result_code(self):
if self.is_cpp_operation():
return "(+%s)" % self.operand.result()
else:
return self.operand.result()
class UnaryMinusNode(UnopNode):
# unary '-' operator
operator = '-'
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
if self.type.is_complex:
self.infix = False
def py_operation_function(self, code):
return "PyNumber_Negative"
def calculate_result_code(self):
if self.infix:
return "(-%s)" % self.operand.result()
else:
return "%s(%s)" % (self.operand.type.unary_op('-'), self.operand.result())
def get_constant_c_result_code(self):
value = self.operand.get_constant_c_result_code()
if value:
return "(-%s)" % value
class TildeNode(UnopNode):
# unary '~' operator
def analyse_c_operation(self, env):
if self.operand.type.is_int:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_enum:
self.type = PyrexTypes.c_int_type
else:
self.type_error()
def py_operation_function(self, code):
return "PyNumber_Invert"
def calculate_result_code(self):
return "(~%s)" % self.operand.result()
class CUnopNode(UnopNode):
def is_py_operation(self):
return False
class DereferenceNode(CUnopNode):
# unary * operator
operator = '*'
def infer_unop_type(self, env, operand_type):
if operand_type.is_ptr:
return operand_type.base_type
else:
return PyrexTypes.error_type
def analyse_c_operation(self, env):
if self.operand.type.is_ptr:
self.type = self.operand.type.base_type
else:
self.type_error()
def calculate_result_code(self):
return "(*%s)" % self.operand.result()
class DecrementIncrementNode(CUnopNode):
# unary ++/-- operator
def analyse_c_operation(self, env):
if self.operand.type.is_numeric:
self.type = PyrexTypes.widest_numeric_type(
self.operand.type, PyrexTypes.c_int_type)
elif self.operand.type.is_ptr:
self.type = self.operand.type
else:
self.type_error()
def calculate_result_code(self):
if self.is_prefix:
return "(%s%s)" % (self.operator, self.operand.result())
else:
return "(%s%s)" % (self.operand.result(), self.operator)
def inc_dec_constructor(is_prefix, operator):
return lambda pos, **kwds: DecrementIncrementNode(pos, is_prefix=is_prefix, operator=operator, **kwds)
class AmpersandNode(CUnopNode):
# The C address-of operator.
#
# operand ExprNode
operator = '&'
def infer_unop_type(self, env, operand_type):
return PyrexTypes.c_ptr_type(operand_type)
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
argtype = self.operand.type
if argtype.is_cpp_class:
self.analyse_cpp_operation(env, overload_check=False)
if not (argtype.is_cfunction or argtype.is_reference or self.operand.is_addressable()):
if argtype.is_memoryviewslice:
self.error("Cannot take address of memoryview slice")
else:
self.error("Taking address of non-lvalue (type %s)" % argtype)
return self
if argtype.is_pyobject:
self.error("Cannot take address of Python %s" % (
"variable '%s'" % self.operand.name if self.operand.is_name else
"object attribute '%s'" % self.operand.attribute if self.operand.is_attribute else
"object"))
return self
if not argtype.is_cpp_class or not self.type:
self.type = PyrexTypes.c_ptr_type(argtype)
return self
def check_const(self):
return self.operand.check_const_addr()
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = ""
def calculate_result_code(self):
return "(&%s)" % self.operand.result()
def generate_result_code(self, code):
if (self.operand.type.is_cpp_class and self.exception_check == '+'):
translate_cpp_exception(code, self.pos,
"%s = %s %s;" % (self.result(), self.operator, self.operand.result()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
unop_node_classes = {
"+": UnaryPlusNode,
"-": UnaryMinusNode,
"~": TildeNode,
}
def unop_node(pos, operator, operand):
# Construct unnop node of appropriate class for
# given operator.
if isinstance(operand, IntNode) and operator == '-':
return IntNode(pos = operand.pos, value = str(-Utils.str_to_number(operand.value)),
longness=operand.longness, unsigned=operand.unsigned)
elif isinstance(operand, UnopNode) and operand.operator == operator in '+-':
warning(pos, "Python has no increment/decrement operator: %s%sx == %s(%sx) == x" % ((operator,)*4), 5)
return unop_node_classes[operator](pos,
operator = operator,
operand = operand)
class TypecastNode(ExprNode):
# C type cast
#
# operand ExprNode
# base_type CBaseTypeNode
# declarator CDeclaratorNode
# typecheck boolean
#
# If used from a transform, one can if wanted specify the attribute
# "type" directly and leave base_type and declarator to None
subexprs = ['operand']
base_type = declarator = type = None
def type_dependencies(self, env):
return ()
def infer_type(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
return self.type
def analyse_types(self, env):
if self.type is None:
base_type = self.base_type.analyse(env)
_, self.type = self.declarator.analyse(base_type, env)
if self.operand.has_constant_result():
# Must be done after self.type is resolved.
self.calculate_constant_result()
if self.type.is_cfunction:
error(self.pos,
"Cannot cast to a function type")
self.type = PyrexTypes.error_type
self.operand = self.operand.analyse_types(env)
if self.type is PyrexTypes.c_bint_type:
# short circuit this to a coercion
return self.operand.coerce_to_boolean(env)
to_py = self.type.is_pyobject
from_py = self.operand.type.is_pyobject
if from_py and not to_py and self.operand.is_ephemeral():
if not self.type.is_numeric and not self.type.is_cpp_class:
error(self.pos, "Casting temporary Python object to non-numeric non-Python type")
if to_py and not from_py:
if self.type is bytes_type and self.operand.type.is_int:
return CoerceIntToBytesNode(self.operand, env)
elif self.operand.type.can_coerce_to_pyobject(env):
self.result_ctype = py_object_type
self.operand = self.operand.coerce_to(self.type, env)
else:
if self.operand.type.is_ptr:
if not (self.operand.type.base_type.is_void or self.operand.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast from pointers of primitive types")
else:
# Should this be an error?
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (
self.operand.type, self.type))
self.operand = self.operand.coerce_to_simple(env)
elif from_py and not to_py:
if self.type.create_from_py_utility_code(env):
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_ptr:
if not (self.type.base_type.is_void or self.type.base_type.is_struct):
error(self.pos, "Python objects cannot be cast to pointers of primitive types")
else:
warning(self.pos, "No conversion from %s to %s, python object pointer used." % (
self.type, self.operand.type))
elif from_py and to_py:
if self.typecheck:
self.operand = PyTypeTestNode(self.operand, self.type, env, notnone=True)
elif isinstance(self.operand, SliceIndexNode):
# This cast can influence the created type of string slices.
self.operand = self.operand.coerce_to(self.type, env)
elif self.type.is_complex and self.operand.type.is_complex:
self.operand = self.operand.coerce_to_simple(env)
elif self.operand.type.is_fused:
self.operand = self.operand.coerce_to(self.type, env)
#self.type = self.operand.type
if self.type.is_ptr and self.type.base_type.is_cfunction and self.type.base_type.nogil:
op_type = self.operand.type
if op_type.is_ptr:
op_type = op_type.base_type
if op_type.is_cfunction and not op_type.nogil:
warning(self.pos,
"Casting a GIL-requiring function into a nogil function circumvents GIL validation", 1)
return self
def is_simple(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_simple()
def is_ephemeral(self):
# either temp or a C cast => no side effects other than the operand's
return self.operand.is_ephemeral()
def nonlocally_immutable(self):
return self.is_temp or self.operand.nonlocally_immutable()
def nogil_check(self, env):
if self.type and self.type.is_pyobject and self.is_temp:
self.gil_error()
def check_const(self):
return self.operand.check_const()
def calculate_constant_result(self):
self.constant_result = self.calculate_result_code(self.operand.constant_result)
def calculate_result_code(self, operand_result = None):
if operand_result is None:
operand_result = self.operand.result()
if self.type.is_complex:
operand_result = self.operand.result()
if self.operand.type.is_complex:
real_part = self.type.real_type.cast_code("__Pyx_CREAL(%s)" % operand_result)
imag_part = self.type.real_type.cast_code("__Pyx_CIMAG(%s)" % operand_result)
else:
real_part = self.type.real_type.cast_code(operand_result)
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
else:
return self.type.cast_code(operand_result)
def get_constant_c_result_code(self):
operand_result = self.operand.get_constant_c_result_code()
if operand_result:
return self.type.cast_code(operand_result)
def result_as(self, type):
if self.type.is_pyobject and not self.is_temp:
# Optimise away some unnecessary casting
return self.operand.result_as(type)
else:
return ExprNode.result_as(self, type)
def generate_result_code(self, code):
if self.is_temp:
code.putln(
"%s = (PyObject *)%s;" % (
self.result(),
self.operand.result()))
code.put_incref(self.result(), self.ctype())
ERR_START = "Start may not be given"
ERR_NOT_STOP = "Stop must be provided to indicate shape"
ERR_STEPS = ("Strides may only be given to indicate contiguity. "
"Consider slicing it after conversion")
ERR_NOT_POINTER = "Can only create cython.array from pointer or array"
ERR_BASE_TYPE = "Pointer base type does not match cython.array base type"
class CythonArrayNode(ExprNode):
"""
Used when a pointer of base_type is cast to a memoryviewslice with that
base type. i.e.
p
creates a fortran-contiguous cython.array.
We leave the type set to object so coercions to object are more efficient
and less work. Acquiring a memoryviewslice from this will be just as
efficient. ExprNode.coerce_to() will do the additional typecheck on
self.compile_time_type
This also handles my_c_array
operand ExprNode the thing we're casting
base_type_node MemoryViewSliceTypeNode the cast expression node
"""
subexprs = ['operand', 'shapes']
shapes = None
is_temp = True
mode = "c"
array_dtype = None
shape_type = PyrexTypes.c_py_ssize_t_type
def analyse_types(self, env):
from . import MemoryView
self.operand = self.operand.analyse_types(env)
if self.array_dtype:
array_dtype = self.array_dtype
else:
array_dtype = self.base_type_node.base_type_node.analyse(env)
axes = self.base_type_node.axes
self.type = error_type
self.shapes = []
ndim = len(axes)
# Base type of the pointer or C array we are converting
base_type = self.operand.type
if not self.operand.type.is_ptr and not self.operand.type.is_array:
error(self.operand.pos, ERR_NOT_POINTER)
return self
# Dimension sizes of C array
array_dimension_sizes = []
if base_type.is_array:
while base_type.is_array:
array_dimension_sizes.append(base_type.size)
base_type = base_type.base_type
elif base_type.is_ptr:
base_type = base_type.base_type
else:
error(self.pos, "unexpected base type %s found" % base_type)
return self
if not (base_type.same_as(array_dtype) or base_type.is_void):
error(self.operand.pos, ERR_BASE_TYPE)
return self
elif self.operand.type.is_array and len(array_dimension_sizes) != ndim:
error(self.operand.pos,
"Expected %d dimensions, array has %d dimensions" %
(ndim, len(array_dimension_sizes)))
return self
# Verify the start, stop and step values
# In case of a C array, use the size of C array in each dimension to
# get an automatic cast
for axis_no, axis in enumerate(axes):
if not axis.start.is_none:
error(axis.start.pos, ERR_START)
return self
if axis.stop.is_none:
if array_dimension_sizes:
dimsize = array_dimension_sizes[axis_no]
axis.stop = IntNode(self.pos, value=str(dimsize),
constant_result=dimsize,
type=PyrexTypes.c_int_type)
else:
error(axis.pos, ERR_NOT_STOP)
return self
axis.stop = axis.stop.analyse_types(env)
shape = axis.stop.coerce_to(self.shape_type, env)
if not shape.is_literal:
shape.coerce_to_temp(env)
self.shapes.append(shape)
first_or_last = axis_no in (0, ndim - 1)
if not axis.step.is_none and first_or_last:
# '1' in the first or last dimension denotes F or C contiguity
axis.step = axis.step.analyse_types(env)
if (not axis.step.type.is_int and axis.step.is_literal and not
axis.step.type.is_error):
error(axis.step.pos, "Expected an integer literal")
return self
if axis.step.compile_time_value(env) != 1:
error(axis.step.pos, ERR_STEPS)
return self
if axis_no == 0:
self.mode = "fortran"
elif not axis.step.is_none and not first_or_last:
# step provided in some other dimension
error(axis.step.pos, ERR_STEPS)
return self
if not self.operand.is_name:
self.operand = self.operand.coerce_to_temp(env)
axes = [('direct', 'follow')] * len(axes)
if self.mode == "fortran":
axes[0] = ('direct', 'contig')
else:
axes[-1] = ('direct', 'contig')
self.coercion_type = PyrexTypes.MemoryViewSliceType(array_dtype, axes)
self.coercion_type.validate_memslice_dtype(self.pos)
self.type = self.get_cython_array_type(env)
MemoryView.use_cython_array_utility_code(env)
env.use_utility_code(MemoryView.typeinfo_to_format_code)
return self
def allocate_temp_result(self, code):
if self.temp_code:
raise RuntimeError("temp allocated multiple times")
self.temp_code = code.funcstate.allocate_temp(self.type, True)
def infer_type(self, env):
return self.get_cython_array_type(env)
def get_cython_array_type(self, env):
cython_scope = env.global_scope().context.cython_scope
cython_scope.load_cythonscope()
return cython_scope.viewscope.lookup("array").type
def generate_result_code(self, code):
from . import Buffer
shapes = [self.shape_type.cast_code(shape.result())
for shape in self.shapes]
dtype = self.coercion_type.dtype
shapes_temp = code.funcstate.allocate_temp(py_object_type, True)
format_temp = code.funcstate.allocate_temp(py_object_type, True)
itemsize = "sizeof(%s)" % dtype.empty_declaration_code()
type_info = Buffer.get_type_information_cname(code, dtype)
if self.operand.type.is_ptr:
code.putln("if (!%s) {" % self.operand.result())
code.putln( 'PyErr_SetString(PyExc_ValueError,'
'"Cannot create cython.array from NULL pointer");')
code.putln(code.error_goto(self.operand.pos))
code.putln("}")
code.putln("%s = __pyx_format_from_typeinfo(&%s); %s" % (
format_temp,
type_info,
code.error_goto_if_null(format_temp, self.pos),
))
code.put_gotref(format_temp)
buildvalue_fmt = " __PYX_BUILD_PY_SSIZE_T " * len(shapes)
code.putln('%s = Py_BuildValue((char*) "(" %s ")", %s); %s' % (
shapes_temp,
buildvalue_fmt,
", ".join(shapes),
code.error_goto_if_null(shapes_temp, self.pos),
))
code.put_gotref(shapes_temp)
tup = (self.result(), shapes_temp, itemsize, format_temp,
self.mode, self.operand.result())
code.putln('%s = __pyx_array_new('
'%s, %s, PyBytes_AS_STRING(%s), '
'(char *) "%s", (char *) %s);' % tup)
code.putln(code.error_goto_if_null(self.result(), self.pos))
code.put_gotref(self.result())
def dispose(temp):
code.put_decref_clear(temp, py_object_type)
code.funcstate.release_temp(temp)
dispose(shapes_temp)
dispose(format_temp)
@classmethod
def from_carray(cls, src_node, env):
"""
Given a C array type, return a CythonArrayNode
"""
pos = src_node.pos
base_type = src_node.type
none_node = NoneNode(pos)
axes = []
while base_type.is_array:
axes.append(SliceNode(pos, start=none_node, stop=none_node,
step=none_node))
base_type = base_type.base_type
axes[-1].step = IntNode(pos, value="1", is_c_literal=True)
memslicenode = Nodes.MemoryViewSliceTypeNode(pos, axes=axes,
base_type_node=base_type)
result = CythonArrayNode(pos, base_type_node=memslicenode,
operand=src_node, array_dtype=base_type)
result = result.analyse_types(env)
return result
class SizeofNode(ExprNode):
# Abstract base class for sizeof(x) expression nodes.
type = PyrexTypes.c_size_t_type
def check_const(self):
return True
def generate_result_code(self, code):
pass
class SizeofTypeNode(SizeofNode):
# C sizeof function applied to a type
#
# base_type CBaseTypeNode
# declarator CDeclaratorNode
subexprs = []
arg_type = None
def analyse_types(self, env):
# we may have incorrectly interpreted a dotted name as a type rather than an attribute
# this could be better handled by more uniformly treating types as runtime-available objects
if 0 and self.base_type.module_path:
path = self.base_type.module_path
obj = env.lookup(path[0])
if obj.as_module is None:
operand = NameNode(pos=self.pos, name=path[0])
for attr in path[1:]:
operand = AttributeNode(pos=self.pos, obj=operand, attribute=attr)
operand = AttributeNode(pos=self.pos, obj=operand, attribute=self.base_type.name)
node = SizeofVarNode(self.pos, operand=operand).analyse_types(env)
return node
if self.arg_type is None:
base_type = self.base_type.analyse(env)
_, arg_type = self.declarator.analyse(base_type, env)
self.arg_type = arg_type
self.check_type()
return self
def check_type(self):
arg_type = self.arg_type
if not arg_type:
return
if arg_type.is_pyobject and not arg_type.is_extension_type:
error(self.pos, "Cannot take sizeof Python object")
elif arg_type.is_void:
error(self.pos, "Cannot take sizeof void")
elif not arg_type.is_complete():
error(self.pos, "Cannot take sizeof incomplete type '%s'" % arg_type)
def calculate_result_code(self):
if self.arg_type.is_extension_type:
# the size of the pointer is boring
# we want the size of the actual struct
arg_code = self.arg_type.declaration_code("", deref=1)
else:
arg_code = self.arg_type.empty_declaration_code()
return "(sizeof(%s))" % arg_code
class SizeofVarNode(SizeofNode):
# C sizeof function applied to a variable
#
# operand ExprNode
subexprs = ['operand']
def analyse_types(self, env):
# We may actually be looking at a type rather than a variable...
# If we are, traditional analysis would fail...
operand_as_type = self.operand.analyse_as_type(env)
if operand_as_type:
self.arg_type = operand_as_type
if self.arg_type.is_fused:
self.arg_type = self.arg_type.specialize(env.fused_to_specific)
self.__class__ = SizeofTypeNode
self.check_type()
else:
self.operand = self.operand.analyse_types(env)
return self
def calculate_result_code(self):
return "(sizeof(%s))" % self.operand.result()
def generate_result_code(self, code):
pass
class TypeidNode(ExprNode):
# C++ typeid operator applied to a type or variable
#
# operand ExprNode
# arg_type ExprNode
# is_variable boolean
type = PyrexTypes.error_type
subexprs = ['operand']
arg_type = None
is_variable = None
is_temp = 1
def get_type_info_type(self, env):
env_module = env
while not env_module.is_module_scope:
env_module = env_module.outer_scope
typeinfo_module = env_module.find_module('libcpp.typeinfo', self.pos)
typeinfo_entry = typeinfo_module.lookup('type_info')
return PyrexTypes.CFakeReferenceType(PyrexTypes.c_const_type(typeinfo_entry.type))
cpp_message = 'typeid operator'
def analyse_types(self, env):
self.cpp_check(env)
type_info = self.get_type_info_type(env)
if not type_info:
self.error("The 'libcpp.typeinfo' module must be cimported to use the typeid() operator")
return self
self.type = type_info
as_type = self.operand.analyse_as_type(env)
if as_type:
self.arg_type = as_type
self.is_type = True
else:
self.arg_type = self.operand.analyse_types(env)
self.is_type = False
if self.arg_type.type.is_pyobject:
self.error("Cannot use typeid on a Python object")
return self
elif self.arg_type.type.is_void:
self.error("Cannot use typeid on void")
return self
elif not self.arg_type.type.is_complete():
self.error("Cannot use typeid on incomplete type '%s'" % self.arg_type.type)
return self
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
return self
def error(self, mess):
error(self.pos, mess)
self.type = PyrexTypes.error_type
self.result_code = ""
def check_const(self):
return True
def calculate_result_code(self):
return self.temp_code
def generate_result_code(self, code):
if self.is_type:
arg_code = self.arg_type.empty_declaration_code()
else:
arg_code = self.arg_type.result()
translate_cpp_exception(code, self.pos,
"%s = typeid(%s);" % (self.temp_code, arg_code),
None, None, self.in_nogil_context)
class TypeofNode(ExprNode):
# Compile-time type of an expression, as a string.
#
# operand ExprNode
# literal StringNode # internal
literal = None
type = py_object_type
subexprs = ['literal'] # 'operand' will be ignored after type analysis!
def analyse_types(self, env):
self.operand = self.operand.analyse_types(env)
value = StringEncoding.EncodedString(str(self.operand.type)) #self.operand.type.typeof_name())
literal = StringNode(self.pos, value=value)
literal = literal.analyse_types(env)
self.literal = literal.coerce_to_pyobject(env)
return self
def analyse_as_type(self, env):
self.operand = self.operand.analyse_types(env)
return self.operand.type
def may_be_none(self):
return False
def generate_evaluation_code(self, code):
self.literal.generate_evaluation_code(code)
def calculate_result_code(self):
return self.literal.calculate_result_code()
#-------------------------------------------------------------------
#
# Binary operator nodes
#
#-------------------------------------------------------------------
try:
matmul_operator = operator.matmul
except AttributeError:
def matmul_operator(a, b):
try:
func = a.__matmul__
except AttributeError:
func = b.__rmatmul__
return func(a, b)
compile_time_binary_operators = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
'is': operator.is_,
'is_not': operator.is_not,
'+': operator.add,
'&': operator.and_,
'/': operator.truediv,
'//': operator.floordiv,
'<<': operator.lshift,
'%': operator.mod,
'*': operator.mul,
'|': operator.or_,
'**': operator.pow,
'>>': operator.rshift,
'-': operator.sub,
'^': operator.xor,
'@': matmul_operator,
'in': lambda x, seq: x in seq,
'not_in': lambda x, seq: x not in seq,
}
def get_compile_time_binop(node):
func = compile_time_binary_operators.get(node.operator)
if not func:
error(node.pos,
"Binary '%s' not supported in compile-time expression"
% node.operator)
return func
class BinopNode(ExprNode):
# operator string
# operand1 ExprNode
# operand2 ExprNode
#
# Processing during analyse_expressions phase:
#
# analyse_c_operation
# Called when neither operand is a pyobject.
# - Check operand types and coerce if needed.
# - Determine result type and result code fragment.
# - Allocate temporary for result if needed.
subexprs = ['operand1', 'operand2']
inplace = False
def calculate_constant_result(self):
func = compile_time_binary_operators[self.operator]
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
func = get_compile_time_binop(self)
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def infer_type(self, env):
return self.result_type(self.operand1.infer_type(env),
self.operand2.infer_type(env), env)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
self.analyse_operation(env)
return self
def analyse_operation(self, env):
if self.is_pythran_operation(env):
self.type = self.result_type(self.operand1.type,
self.operand2.type, env)
assert self.type.is_pythran_expr
self.is_temp = 1
elif self.is_py_operation():
self.coerce_operands_to_pyobjects(env)
self.type = self.result_type(self.operand1.type,
self.operand2.type, env)
assert self.type.is_pyobject
self.is_temp = 1
elif self.is_cpp_operation():
self.analyse_cpp_operation(env)
else:
self.analyse_c_operation(env)
def is_py_operation(self):
return self.is_py_operation_types(self.operand1.type, self.operand2.type)
def is_py_operation_types(self, type1, type2):
return type1.is_pyobject or type2.is_pyobject or type1.is_ctuple or type2.is_ctuple
def is_pythran_operation(self, env):
return self.is_pythran_operation_types(self.operand1.type, self.operand2.type, env)
def is_pythran_operation_types(self, type1, type2, env):
# Support only expr op supported_type, or supported_type op expr
return has_np_pythran(env) and \
(is_pythran_supported_operation_type(type1) and is_pythran_supported_operation_type(type2)) and \
(is_pythran_expr(type1) or is_pythran_expr(type2))
def is_cpp_operation(self):
return (self.operand1.type.is_cpp_class
or self.operand2.type.is_cpp_class)
def analyse_cpp_operation(self, env):
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if not entry:
self.type_error()
return
func_type = entry.type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
# Used by NumBinopNodes to break up expressions involving multiple
# operators so that exceptions can be handled properly.
self.is_temp = 1
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if func_type.is_ptr:
func_type = func_type.base_type
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def result_type(self, type1, type2, env):
if self.is_pythran_operation_types(type1, type2, env):
return PythranExpr(pythran_binop_type(self.operator, type1, type2))
if self.is_py_operation_types(type1, type2):
if type2.is_string:
type2 = Builtin.bytes_type
elif type2.is_pyunicode_ptr:
type2 = Builtin.unicode_type
if type1.is_string:
type1 = Builtin.bytes_type
elif type1.is_pyunicode_ptr:
type1 = Builtin.unicode_type
if type1.is_builtin_type or type2.is_builtin_type:
if type1 is type2 and self.operator in '**%+|&^':
# FIXME: at least these operators should be safe - others?
return type1
result_type = self.infer_builtin_types_operation(type1, type2)
if result_type is not None:
return result_type
return py_object_type
elif type1.is_error or type2.is_error:
return PyrexTypes.error_type
else:
return self.compute_c_result_type(type1, type2)
def infer_builtin_types_operation(self, type1, type2):
return None
def nogil_check(self, env):
if self.is_py_operation():
self.gil_error()
def coerce_operands_to_pyobjects(self, env):
self.operand1 = self.operand1.coerce_to_pyobject(env)
self.operand2 = self.operand2.coerce_to_pyobject(env)
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def is_ephemeral(self):
return (super(BinopNode, self).is_ephemeral() or
self.operand1.is_ephemeral() or self.operand2.is_ephemeral())
def generate_result_code(self, code):
if self.type.is_pythran_expr:
code.putln("// Pythran binop")
code.putln("__Pyx_call_destructor(%s);" % self.result())
if self.operator == '**':
code.putln("new (&%s) decltype(%s){pythonic::numpy::functor::power{}(%s, %s)};" % (
self.result(),
self.result(),
self.operand1.pythran_result(),
self.operand2.pythran_result()))
else:
code.putln("new (&%s) decltype(%s){%s %s %s};" % (
self.result(),
self.result(),
self.operand1.pythran_result(),
self.operator,
self.operand2.pythran_result()))
elif self.operand1.type.is_pyobject:
function = self.py_operation_function(code)
if self.operator == '**':
extra_args = ", Py_None"
else:
extra_args = ""
code.putln(
"%s = %s(%s, %s%s); %s" % (
self.result(),
function,
self.operand1.py_result(),
self.operand2.py_result(),
extra_args,
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
elif self.is_temp:
# C++ overloaded operators with exception values are currently all
# handled through temporaries.
if self.is_cpp_operation() and self.exception_check == '+':
translate_cpp_exception(code, self.pos,
"%s = %s;" % (self.result(), self.calculate_result_code()),
self.result() if self.type.is_pyobject else None,
self.exception_value, self.in_nogil_context)
else:
code.putln("%s = %s;" % (self.result(), self.calculate_result_code()))
def type_error(self):
if not (self.operand1.type.is_error
or self.operand2.type.is_error):
error(self.pos, "Invalid operand types for '%s' (%s; %s)" %
(self.operator, self.operand1.type,
self.operand2.type))
self.type = PyrexTypes.error_type
class CBinopNode(BinopNode):
def analyse_types(self, env):
node = BinopNode.analyse_types(self, env)
if node.is_py_operation():
node.type = PyrexTypes.error_type
return node
def py_operation_function(self, code):
return ""
def calculate_result_code(self):
return "(%s %s %s)" % (
self.operand1.result(),
self.operator,
self.operand2.result())
def compute_c_result_type(self, type1, type2):
cpp_type = None
if type1.is_cpp_class or type1.is_ptr:
cpp_type = type1.find_cpp_operation_type(self.operator, type2)
if cpp_type is None and (type2.is_cpp_class or type2.is_ptr):
cpp_type = type2.find_cpp_operation_type(self.operator, type1)
# FIXME: do we need to handle other cases here?
return cpp_type
def c_binop_constructor(operator):
def make_binop_node(pos, **operands):
return CBinopNode(pos, operator=operator, **operands)
return make_binop_node
class NumBinopNode(BinopNode):
# Binary operation taking numeric arguments.
infix = True
overflow_check = False
overflow_bit_node = None
def analyse_c_operation(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.type = self.compute_c_result_type(type1, type2)
if not self.type:
self.type_error()
return
if self.type.is_complex:
self.infix = False
if (self.type.is_int
and env.directives['overflowcheck']
and self.operator in self.overflow_op_names):
if (self.operator in ('+', '*')
and self.operand1.has_constant_result()
and not self.operand2.has_constant_result()):
self.operand1, self.operand2 = self.operand2, self.operand1
self.overflow_check = True
self.overflow_fold = env.directives['overflowcheck.fold']
self.func = self.type.overflow_check_binop(
self.overflow_op_names[self.operator],
env,
const_rhs = self.operand2.has_constant_result())
self.is_temp = True
if not self.infix or (type1.is_numeric and type2.is_numeric):
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
def compute_c_result_type(self, type1, type2):
if self.c_types_okay(type1, type2):
widest_type = PyrexTypes.widest_numeric_type(type1, type2)
if widest_type is PyrexTypes.c_bint_type:
if self.operator not in '|^&':
# False + False == 0 # not False!
widest_type = PyrexTypes.c_int_type
else:
widest_type = PyrexTypes.widest_numeric_type(
widest_type, PyrexTypes.c_int_type)
return widest_type
else:
return None
def may_be_none(self):
if self.type and self.type.is_builtin_type:
# if we know the result type, we know the operation, so it can't be None
return False
type1 = self.operand1.type
type2 = self.operand2.type
if type1 and type1.is_builtin_type and type2 and type2.is_builtin_type:
# XXX: I can't think of any case where a binary operation
# on builtin types evaluates to None - add a special case
# here if there is one.
return False
return super(NumBinopNode, self).may_be_none()
def get_constant_c_result_code(self):
value1 = self.operand1.get_constant_c_result_code()
value2 = self.operand2.get_constant_c_result_code()
if value1 and value2:
return "(%s %s %s)" % (value1, self.operator, value2)
else:
return None
def c_types_okay(self, type1, type2):
#print "NumBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_numeric or type1.is_enum) \
and (type2.is_numeric or type2.is_enum)
def generate_evaluation_code(self, code):
if self.overflow_check:
self.overflow_bit_node = self
self.overflow_bit = code.funcstate.allocate_temp(PyrexTypes.c_int_type, manage_ref=False)
code.putln("%s = 0;" % self.overflow_bit)
super(NumBinopNode, self).generate_evaluation_code(code)
if self.overflow_check:
code.putln("if (unlikely(%s)) {" % self.overflow_bit)
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large");')
code.putln(code.error_goto(self.pos))
code.putln("}")
code.funcstate.release_temp(self.overflow_bit)
def calculate_result_code(self):
if self.overflow_bit_node is not None:
return "%s(%s, %s, &%s)" % (
self.func,
self.operand1.result(),
self.operand2.result(),
self.overflow_bit_node.overflow_bit)
elif self.type.is_cpp_class or self.infix:
if is_pythran_expr(self.type):
result1, result2 = self.operand1.pythran_result(), self.operand2.pythran_result()
else:
result1, result2 = self.operand1.result(), self.operand2.result()
return "(%s %s %s)" % (result1, self.operator, result2)
else:
func = self.type.binary_op(self.operator)
if func is None:
error(self.pos, "binary operator %s not supported for %s" % (self.operator, self.type))
return "%s(%s, %s)" % (
func,
self.operand1.result(),
self.operand2.result())
def is_py_operation_types(self, type1, type2):
return (type1.is_unicode_char or
type2.is_unicode_char or
BinopNode.is_py_operation_types(self, type1, type2))
def py_operation_function(self, code):
function_name = self.py_functions[self.operator]
if self.inplace:
function_name = function_name.replace('PyNumber_', 'PyNumber_InPlace')
return function_name
py_functions = {
"|": "PyNumber_Or",
"^": "PyNumber_Xor",
"&": "PyNumber_And",
"<<": "PyNumber_Lshift",
">>": "PyNumber_Rshift",
"+": "PyNumber_Add",
"-": "PyNumber_Subtract",
"*": "PyNumber_Multiply",
"@": "__Pyx_PyNumber_MatrixMultiply",
"/": "__Pyx_PyNumber_Divide",
"//": "PyNumber_FloorDivide",
"%": "PyNumber_Remainder",
"**": "PyNumber_Power",
}
overflow_op_names = {
"+": "add",
"-": "sub",
"*": "mul",
"<<": "lshift",
}
class IntBinopNode(NumBinopNode):
# Binary operation taking integer arguments.
def c_types_okay(self, type1, type2):
#print "IntBinopNode.c_types_okay:", type1, type2 ###
return (type1.is_int or type1.is_enum) \
and (type2.is_int or type2.is_enum)
class AddNode(NumBinopNode):
# '+' operator.
def is_py_operation_types(self, type1, type2):
if type1.is_string and type2.is_string or type1.is_pyunicode_ptr and type2.is_pyunicode_ptr:
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# b'abc' + 'abc' raises an exception in Py3,
# so we can safely infer the Py2 type for bytes here
string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2 in string_types:
return string_types[max(string_types.index(type1),
string_types.index(type2))]
return None
def compute_c_result_type(self, type1, type2):
#print "AddNode.compute_c_result_type:", type1, self.operator, type2 ###
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type2.is_ptr or type2.is_array) and (type1.is_int or type1.is_enum):
return type2
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
def py_operation_function(self, code):
type1, type2 = self.operand1.type, self.operand2.type
if type1 is unicode_type or type2 is unicode_type:
if type1 in (unicode_type, str_type) and type2 in (unicode_type, str_type):
is_unicode_concat = True
elif isinstance(self.operand1, FormattedValueNode) or isinstance(self.operand2, FormattedValueNode):
# Assume that even if we don't know the second type, it's going to be a string.
is_unicode_concat = True
else:
# Operation depends on the second type.
is_unicode_concat = False
if is_unicode_concat:
if self.operand1.may_be_none() or self.operand2.may_be_none():
return '__Pyx_PyUnicode_ConcatSafe'
else:
return '__Pyx_PyUnicode_Concat'
return super(AddNode, self).py_operation_function(code)
class SubNode(NumBinopNode):
# '-' operator.
def compute_c_result_type(self, type1, type2):
if (type1.is_ptr or type1.is_array) and (type2.is_int or type2.is_enum):
return type1
elif (type1.is_ptr or type1.is_array) and (type2.is_ptr or type2.is_array):
return PyrexTypes.c_ptrdiff_t_type
else:
return NumBinopNode.compute_c_result_type(
self, type1, type2)
class MulNode(NumBinopNode):
# '*' operator.
def is_py_operation_types(self, type1, type2):
if ((type1.is_string and type2.is_int) or
(type2.is_string and type1.is_int)):
return 1
else:
return NumBinopNode.is_py_operation_types(self, type1, type2)
def infer_builtin_types_operation(self, type1, type2):
# let's assume that whatever builtin type you multiply a string with
# will either return a string of the same type or fail with an exception
string_types = (bytes_type, bytearray_type, str_type, basestring_type, unicode_type)
if type1 in string_types and type2.is_builtin_type:
return type1
if type2 in string_types and type1.is_builtin_type:
return type2
# multiplication of containers/numbers with an integer value
# always (?) returns the same type
if type1.is_int:
return type2
if type2.is_int:
return type1
return None
class MatMultNode(NumBinopNode):
# '@' operator.
def is_py_operation_types(self, type1, type2):
return True
def generate_evaluation_code(self, code):
code.globalstate.use_utility_code(UtilityCode.load_cached("MatrixMultiply", "ObjectHandling.c"))
super(MatMultNode, self).generate_evaluation_code(code)
class DivNode(NumBinopNode):
# '/' or '//' operator.
cdivision = None
truedivision = None # == "unknown" if operator == '/'
ctruedivision = False
cdivision_warnings = False
zerodivision_check = None
def find_compile_time_binary_operator(self, op1, op2):
func = compile_time_binary_operators[self.operator]
if self.operator == '/' and self.truedivision is None:
# => true div for floats, floor div for integers
if isinstance(op1, _py_int_types) and isinstance(op2, _py_int_types):
func = compile_time_binary_operators['//']
return func
def calculate_constant_result(self):
op1 = self.operand1.constant_result
op2 = self.operand2.constant_result
func = self.find_compile_time_binary_operator(op1, op2)
self.constant_result = func(
self.operand1.constant_result,
self.operand2.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
try:
func = self.find_compile_time_binary_operator(
operand1, operand2)
return func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
def _check_truedivision(self, env):
if self.cdivision or env.directives['cdivision']:
self.ctruedivision = False
else:
self.ctruedivision = self.truedivision
def infer_type(self, env):
self._check_truedivision(env)
return self.result_type(
self.operand1.infer_type(env),
self.operand2.infer_type(env), env)
def analyse_operation(self, env):
self._check_truedivision(env)
NumBinopNode.analyse_operation(self, env)
if self.is_cpp_operation():
self.cdivision = True
if not self.type.is_pyobject:
self.zerodivision_check = (
self.cdivision is None and not env.directives['cdivision']
and (not self.operand2.has_constant_result() or
self.operand2.constant_result == 0))
if self.zerodivision_check or env.directives['cdivision_warnings']:
# Need to check ahead of time to warn or raise zero division error
self.operand1 = self.operand1.coerce_to_simple(env)
self.operand2 = self.operand2.coerce_to_simple(env)
def compute_c_result_type(self, type1, type2):
if self.operator == '/' and self.ctruedivision and not type1.is_cpp_class and not type2.is_cpp_class:
if not type1.is_float and not type2.is_float:
widest_type = PyrexTypes.widest_numeric_type(type1, PyrexTypes.c_double_type)
widest_type = PyrexTypes.widest_numeric_type(type2, widest_type)
return widest_type
return NumBinopNode.compute_c_result_type(self, type1, type2)
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float division"
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.type.is_complex:
if self.cdivision is None:
self.cdivision = (
code.globalstate.directives['cdivision']
or self.type.is_float
or ((self.type.is_numeric or self.type.is_enum) and not self.type.signed)
)
if not self.cdivision:
code.globalstate.use_utility_code(
UtilityCode.load_cached("DivInt", "CMath.c").specialize(self.type))
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def generate_div_warning_code(self, code):
in_nogil = self.in_nogil_context
if not self.type.is_pyobject:
if self.zerodivision_check:
if not self.infix:
zero_test = "%s(%s)" % (self.type.unary_op('zero'), self.operand2.result())
else:
zero_test = "%s == 0" % self.operand2.result()
code.putln("if (unlikely(%s)) {" % zero_test)
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_ZeroDivisionError, "%s");' % self.zero_division_message())
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if self.type.is_int and self.type.signed and self.operator != '%':
code.globalstate.use_utility_code(UtilityCode.load_cached("UnaryNegOverflows", "Overflow.c"))
if self.operand2.type.signed == 2:
# explicitly signed, no runtime check needed
minus1_check = 'unlikely(%s == -1)' % self.operand2.result()
else:
type_of_op2 = self.operand2.type.empty_declaration_code()
minus1_check = '(!(((%s)-1) > 0)) && unlikely(%s == (%s)-1)' % (
type_of_op2, self.operand2.result(), type_of_op2)
code.putln("else if (sizeof(%s) == sizeof(long) && %s "
" && unlikely(UNARY_NEG_WOULD_OVERFLOW(%s))) {" % (
self.type.empty_declaration_code(),
minus1_check,
self.operand1.result()))
if in_nogil:
code.put_ensure_gil()
code.putln('PyErr_SetString(PyExc_OverflowError, "value too large to perform division");')
if in_nogil:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
if code.globalstate.directives['cdivision_warnings'] and self.operator != '/':
code.globalstate.use_utility_code(
UtilityCode.load_cached("CDivisionWarning", "CMath.c"))
code.putln("if (unlikely((%s < 0) ^ (%s < 0))) {" % (
self.operand1.result(),
self.operand2.result()))
warning_code = "__Pyx_cdivision_warning(%(FILENAME)s, %(LINENO)s)" % {
'FILENAME': Naming.filename_cname,
'LINENO': Naming.lineno_cname,
}
if in_nogil:
result_code = 'result'
code.putln("int %s;" % result_code)
code.put_ensure_gil()
code.putln(code.set_error_info(self.pos, used=True))
code.putln("%s = %s;" % (result_code, warning_code))
code.put_release_ensured_gil()
else:
result_code = warning_code
code.putln(code.set_error_info(self.pos, used=True))
code.put("if (unlikely(%s)) " % result_code)
code.put_goto(code.error_label)
code.putln("}")
def calculate_result_code(self):
if self.type.is_complex or self.is_cpp_operation():
return NumBinopNode.calculate_result_code(self)
elif self.type.is_float and self.operator == '//':
return "floor(%s / %s)" % (
self.operand1.result(),
self.operand2.result())
elif self.truedivision or self.cdivision:
op1 = self.operand1.result()
op2 = self.operand2.result()
if self.truedivision:
if self.type != self.operand1.type:
op1 = self.type.cast_code(op1)
if self.type != self.operand2.type:
op2 = self.type.cast_code(op2)
return "(%s / %s)" % (op1, op2)
else:
return "__Pyx_div_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
_find_formatting_types = re.compile(
br"%"
br"(?:%|" # %%
br"(?:\([^)]+\))?" # %(name)
br"[-+#,0-9 ]*([a-z])" # %.2f etc.
br")").findall
# These format conversion types can never trigger a Unicode string conversion in Py2.
_safe_bytes_formats = set([
# Excludes 's' and 'r', which can generate non-bytes strings.
b'd', b'i', b'o', b'u', b'x', b'X', b'e', b'E', b'f', b'F', b'g', b'G', b'c', b'b', b'a',
])
class ModNode(DivNode):
# '%' operator.
def is_py_operation_types(self, type1, type2):
return (type1.is_string
or type2.is_string
or NumBinopNode.is_py_operation_types(self, type1, type2))
def infer_builtin_types_operation(self, type1, type2):
# b'%s' % xyz raises an exception in Py3<3.5, so it's safe to infer the type for Py2 and later Py3's.
if type1 is unicode_type:
# None + xyz may be implemented by RHS
if type2.is_builtin_type or not self.operand1.may_be_none():
return type1
elif type1 in (bytes_type, str_type, basestring_type):
if type2 is unicode_type:
return type2
elif type2.is_numeric:
return type1
elif self.operand1.is_string_literal:
if type1 is str_type or type1 is bytes_type:
if set(_find_formatting_types(self.operand1.value)) <= _safe_bytes_formats:
return type1
return basestring_type
elif type1 is bytes_type and not type2.is_builtin_type:
return None # RHS might implement '% operator differently in Py3
else:
return basestring_type # either str or unicode, can't tell
return None
def zero_division_message(self):
if self.type.is_int:
return "integer division or modulo by zero"
else:
return "float divmod()"
def analyse_operation(self, env):
DivNode.analyse_operation(self, env)
if not self.type.is_pyobject:
if self.cdivision is None:
self.cdivision = env.directives['cdivision'] or not self.type.signed
if not self.cdivision and not self.type.is_int and not self.type.is_float:
error(self.pos, "mod operator not supported for type '%s'" % self.type)
def generate_evaluation_code(self, code):
if not self.type.is_pyobject and not self.cdivision:
if self.type.is_int:
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModInt", "CMath.c").specialize(self.type))
else: # float
code.globalstate.use_utility_code(
UtilityCode.load_cached("ModFloat", "CMath.c").specialize(
self.type, math_h_modifier=self.type.math_h_modifier))
# NOTE: skipping over DivNode here
NumBinopNode.generate_evaluation_code(self, code)
self.generate_div_warning_code(code)
def calculate_result_code(self):
if self.cdivision:
if self.type.is_float:
return "fmod%s(%s, %s)" % (
self.type.math_h_modifier,
self.operand1.result(),
self.operand2.result())
else:
return "(%s %% %s)" % (
self.operand1.result(),
self.operand2.result())
else:
return "__Pyx_mod_%s(%s, %s)" % (
self.type.specialization_name(),
self.operand1.result(),
self.operand2.result())
def py_operation_function(self, code):
type1, type2 = self.operand1.type, self.operand2.type
# ("..." % x) must call "x.__rmod__()" for string subtypes.
if type1 is unicode_type:
if self.operand1.may_be_none() or (
type2.is_extension_type and type2.subtype_of(type1) or
type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)):
return '__Pyx_PyUnicode_FormatSafe'
else:
return 'PyUnicode_Format'
elif type1 is str_type:
if self.operand1.may_be_none() or (
type2.is_extension_type and type2.subtype_of(type1) or
type2 is py_object_type and not isinstance(self.operand2, CoerceToPyTypeNode)):
return '__Pyx_PyString_FormatSafe'
else:
return '__Pyx_PyString_Format'
return super(ModNode, self).py_operation_function(code)
class PowNode(NumBinopNode):
# '**' operator.
def analyse_c_operation(self, env):
NumBinopNode.analyse_c_operation(self, env)
if self.type.is_complex:
if self.type.real_type.is_float:
self.operand1 = self.operand1.coerce_to(self.type, env)
self.operand2 = self.operand2.coerce_to(self.type, env)
self.pow_func = self.type.binary_op('**')
else:
error(self.pos, "complex int powers not supported")
self.pow_func = ""
elif self.type.is_float:
self.pow_func = "pow" + self.type.math_h_modifier
elif self.type.is_int:
self.pow_func = "__Pyx_pow_%s" % self.type.empty_declaration_code().replace(' ', '_')
env.use_utility_code(
UtilityCode.load_cached("IntPow", "CMath.c").specialize(
func_name=self.pow_func,
type=self.type.empty_declaration_code(),
signed=self.type.signed and 1 or 0))
elif not self.type.is_error:
error(self.pos, "got unexpected types for C power operator: %s, %s" %
(self.operand1.type, self.operand2.type))
def calculate_result_code(self):
# Work around MSVC overloading ambiguity.
def typecast(operand):
if self.type == operand.type:
return operand.result()
else:
return self.type.cast_code(operand.result())
return "%s(%s, %s)" % (
self.pow_func,
typecast(self.operand1),
typecast(self.operand2))
def py_operation_function(self, code):
if (self.type.is_pyobject and
self.operand1.constant_result == 2 and
isinstance(self.operand1.constant_result, _py_int_types) and
self.operand2.type is py_object_type):
code.globalstate.use_utility_code(UtilityCode.load_cached('PyNumberPow2', 'Optimize.c'))
if self.inplace:
return '__Pyx_PyNumber_InPlacePowerOf2'
else:
return '__Pyx_PyNumber_PowerOf2'
return super(PowNode, self).py_operation_function(code)
class BoolBinopNode(ExprNode):
"""
Short-circuiting boolean operation.
Note that this node provides the same code generation method as
BoolBinopResultNode to simplify expression nesting.
operator string "and"/"or"
operand1 BoolBinopNode/BoolBinopResultNode left operand
operand2 BoolBinopNode/BoolBinopResultNode right operand
"""
subexprs = ['operand1', 'operand2']
is_temp = True
operator = None
operand1 = None
operand2 = None
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
return PyrexTypes.independent_spanning_type(type1, type2)
def may_be_none(self):
if self.operator == 'or':
return self.operand2.may_be_none()
else:
return self.operand1.may_be_none() or self.operand2.may_be_none()
def calculate_constant_result(self):
operand1 = self.operand1.constant_result
operand2 = self.operand2.constant_result
if self.operator == 'and':
self.constant_result = operand1 and operand2
else:
self.constant_result = operand1 or operand2
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
operand2 = self.operand2.compile_time_value(denv)
if self.operator == 'and':
return operand1 and operand2
else:
return operand1 or operand2
def is_ephemeral(self):
return self.operand1.is_ephemeral() or self.operand2.is_ephemeral()
def analyse_types(self, env):
# Note: we do not do any coercion here as we most likely do not know the final type anyway.
# We even accept to set self.type to ErrorType if both operands do not have a spanning type.
# The coercion to the final type and to a "simple" value is left to coerce_to().
operand1 = self.operand1.analyse_types(env)
operand2 = self.operand2.analyse_types(env)
self.type = PyrexTypes.independent_spanning_type(
operand1.type, operand2.type)
self.operand1 = self._wrap_operand(operand1, env)
self.operand2 = self._wrap_operand(operand2, env)
return self
def _wrap_operand(self, operand, env):
if not isinstance(operand, (BoolBinopNode, BoolBinopResultNode)):
operand = BoolBinopResultNode(operand, self.type, env)
return operand
def wrap_operands(self, env):
"""
Must get called by transforms that want to create a correct BoolBinopNode
after the type analysis phase.
"""
self.operand1 = self._wrap_operand(self.operand1, env)
self.operand2 = self._wrap_operand(self.operand2, env)
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
operand1 = self.operand1.coerce_to(dst_type, env)
operand2 = self.operand2.coerce_to(dst_type, env)
return BoolBinopNode.from_node(
self, type=dst_type,
operator=self.operator,
operand1=operand1, operand2=operand2)
def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
outer_labels = (and_label, or_label)
if self.operator == 'and':
my_label = and_label = code.new_label('next_and')
else:
my_label = or_label = code.new_label('next_or')
self.operand1.generate_bool_evaluation_code(
code, final_result_temp, final_result_type, and_label, or_label, end_label, my_label)
and_label, or_label = outer_labels
code.put_label(my_label)
self.operand2.generate_bool_evaluation_code(
code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through)
def generate_evaluation_code(self, code):
self.allocate_temp_result(code)
result_type = PyrexTypes.py_object_type if self.type.is_pyobject else self.type
or_label = and_label = None
end_label = code.new_label('bool_binop_done')
self.generate_bool_evaluation_code(code, self.result(), result_type, and_label, or_label, end_label, end_label)
code.put_label(end_label)
gil_message = "Truth-testing Python object"
def check_const(self):
return self.operand1.check_const() and self.operand2.check_const()
def generate_subexpr_disposal_code(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def free_subexpr_temps(self, code):
pass # nothing to do here, all done in generate_evaluation_code()
def generate_operand1_test(self, code):
# Generate code to test the truth of the first operand.
if self.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.operand1.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.operand1.result()
return (test_result, self.type.is_pyobject)
class BoolBinopResultNode(ExprNode):
"""
Intermediate result of a short-circuiting and/or expression.
Tests the result for 'truthiness' and takes care of coercing the final result
of the overall expression to the target type.
Note that this node provides the same code generation method as
BoolBinopNode to simplify expression nesting.
arg ExprNode the argument to test
value ExprNode the coerced result value node
"""
subexprs = ['arg', 'value']
is_temp = True
arg = None
value = None
def __init__(self, arg, result_type, env):
# using 'arg' multiple times, so it must be a simple/temp value
arg = arg.coerce_to_simple(env)
# wrap in ProxyNode, in case a transform wants to replace self.arg later
arg = ProxyNode(arg)
super(BoolBinopResultNode, self).__init__(
arg.pos, arg=arg, type=result_type,
value=CloneNode(arg).coerce_to(result_type, env))
def coerce_to_boolean(self, env):
return self.coerce_to(PyrexTypes.c_bint_type, env)
def coerce_to(self, dst_type, env):
# unwrap, coerce, rewrap
arg = self.arg.arg
if dst_type is PyrexTypes.c_bint_type:
arg = arg.coerce_to_boolean(env)
# TODO: unwrap more coercion nodes?
return BoolBinopResultNode(arg, dst_type, env)
def nogil_check(self, env):
# let's leave all errors to BoolBinopNode
pass
def generate_operand_test(self, code):
# Generate code to test the truth of the first operand.
if self.arg.type.is_pyobject:
test_result = code.funcstate.allocate_temp(
PyrexTypes.c_bint_type, manage_ref=False)
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
test_result,
self.arg.py_result(),
code.error_goto_if_neg(test_result, self.pos)))
else:
test_result = self.arg.result()
return (test_result, self.arg.type.is_pyobject)
def generate_bool_evaluation_code(self, code, final_result_temp, final_result_type, and_label, or_label, end_label, fall_through):
code.mark_pos(self.pos)
# x => x
# x and ... or ... => next 'and' / 'or'
# False ... or x => next 'or'
# True and x => next 'and'
# True or x => True (operand)
self.arg.generate_evaluation_code(code)
if and_label or or_label:
test_result, uses_temp = self.generate_operand_test(code)
if uses_temp and (and_label and or_label):
# cannot become final result => free early
# disposal: uses_temp and (and_label and or_label)
self.arg.generate_disposal_code(code)
sense = '!' if or_label else ''
code.putln("if (%s%s) {" % (sense, test_result))
if uses_temp:
code.funcstate.release_temp(test_result)
if not uses_temp or not (and_label and or_label):
# disposal: (not uses_temp) or {not (and_label and or_label) [if]}
self.arg.generate_disposal_code(code)
if or_label and or_label != fall_through:
# value is false => short-circuit to next 'or'
code.put_goto(or_label)
if and_label:
# value is true => go to next 'and'
if or_label:
code.putln("} else {")
if not uses_temp:
# disposal: (not uses_temp) and {(and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
if and_label != fall_through:
code.put_goto(and_label)
if not and_label or not or_label:
# if no next 'and' or 'or', we provide the result
if and_label or or_label:
code.putln("} else {")
self.value.generate_evaluation_code(code)
self.value.make_owned_reference(code)
code.putln("%s = %s;" % (final_result_temp, self.value.result_as(final_result_type)))
self.value.generate_post_assignment_code(code)
# disposal: {not (and_label and or_label) [else]}
self.arg.generate_disposal_code(code)
self.value.free_temps(code)
if end_label != fall_through:
code.put_goto(end_label)
if and_label or or_label:
code.putln("}")
self.arg.free_temps(code)
class CondExprNode(ExprNode):
# Short-circuiting conditional expression.
#
# test ExprNode
# true_val ExprNode
# false_val ExprNode
true_val = None
false_val = None
is_temp = True
subexprs = ['test', 'true_val', 'false_val']
def type_dependencies(self, env):
return self.true_val.type_dependencies(env) + self.false_val.type_dependencies(env)
def infer_type(self, env):
return PyrexTypes.independent_spanning_type(
self.true_val.infer_type(env),
self.false_val.infer_type(env))
def calculate_constant_result(self):
if self.test.constant_result:
self.constant_result = self.true_val.constant_result
else:
self.constant_result = self.false_val.constant_result
def is_ephemeral(self):
return self.true_val.is_ephemeral() or self.false_val.is_ephemeral()
def analyse_types(self, env):
self.test = self.test.analyse_types(env).coerce_to_boolean(env)
self.true_val = self.true_val.analyse_types(env)
self.false_val = self.false_val.analyse_types(env)
return self.analyse_result_type(env)
def analyse_result_type(self, env):
self.type = PyrexTypes.independent_spanning_type(
self.true_val.type, self.false_val.type)
if self.type.is_reference:
self.type = PyrexTypes.CFakeReferenceType(self.type.ref_base_type)
if self.type.is_pyobject:
self.result_ctype = py_object_type
elif self.true_val.is_ephemeral() or self.false_val.is_ephemeral():
error(self.pos, "Unsafe C derivative of temporary Python reference used in conditional expression")
if self.true_val.type.is_pyobject or self.false_val.type.is_pyobject:
self.true_val = self.true_val.coerce_to(self.type, env)
self.false_val = self.false_val.coerce_to(self.type, env)
if self.type.is_error:
self.type_error()
return self
def coerce_to_integer(self, env):
self.true_val = self.true_val.coerce_to_integer(env)
self.false_val = self.false_val.coerce_to_integer(env)
self.result_ctype = None
return self.analyse_result_type(env)
def coerce_to(self, dst_type, env):
self.true_val = self.true_val.coerce_to(dst_type, env)
self.false_val = self.false_val.coerce_to(dst_type, env)
self.result_ctype = None
return self.analyse_result_type(env)
def type_error(self):
if not (self.true_val.type.is_error or self.false_val.type.is_error):
error(self.pos, "Incompatible types in conditional expression (%s; %s)" %
(self.true_val.type, self.false_val.type))
self.type = PyrexTypes.error_type
def check_const(self):
return (self.test.check_const()
and self.true_val.check_const()
and self.false_val.check_const())
def generate_evaluation_code(self, code):
# Because subexprs may not be evaluated we can use a more optimal
# subexpr allocation strategy than the default, so override evaluation_code.
code.mark_pos(self.pos)
self.allocate_temp_result(code)
self.test.generate_evaluation_code(code)
code.putln("if (%s) {" % self.test.result())
self.eval_and_get(code, self.true_val)
code.putln("} else {")
self.eval_and_get(code, self.false_val)
code.putln("}")
self.test.generate_disposal_code(code)
self.test.free_temps(code)
def eval_and_get(self, code, expr):
expr.generate_evaluation_code(code)
if self.type.is_memoryviewslice:
expr.make_owned_memoryviewslice(code)
else:
expr.make_owned_reference(code)
code.putln('%s = %s;' % (self.result(), expr.result_as(self.ctype())))
expr.generate_post_assignment_code(code)
expr.free_temps(code)
def generate_subexpr_disposal_code(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
def free_subexpr_temps(self, code):
pass # done explicitly above (cleanup must separately happen within the if/else blocks)
richcmp_constants = {
"<" : "Py_LT",
"<=": "Py_LE",
"==": "Py_EQ",
"!=": "Py_NE",
"<>": "Py_NE",
">" : "Py_GT",
">=": "Py_GE",
# the following are faked by special compare functions
"in" : "Py_EQ",
"not_in": "Py_NE",
}
class CmpNode(object):
# Mixin class containing code common to PrimaryCmpNodes
# and CascadedCmpNodes.
special_bool_cmp_function = None
special_bool_cmp_utility_code = None
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def calculate_cascaded_constant_result(self, operand1_result):
func = compile_time_binary_operators[self.operator]
operand2_result = self.operand2.constant_result
if (isinstance(operand1_result, any_string_type) and
isinstance(operand2_result, any_string_type) and
type(operand1_result) != type(operand2_result)):
# string comparison of different types isn't portable
return
if self.operator in ('in', 'not_in'):
if isinstance(self.operand2, (ListNode, TupleNode, SetNode)):
if not self.operand2.args:
self.constant_result = self.operator == 'not_in'
return
elif isinstance(self.operand2, ListNode) and not self.cascade:
# tuples are more efficient to store than lists
self.operand2 = self.operand2.as_tuple()
elif isinstance(self.operand2, DictNode):
if not self.operand2.key_value_pairs:
self.constant_result = self.operator == 'not_in'
return
self.constant_result = func(operand1_result, operand2_result)
def cascaded_compile_time_value(self, operand1, denv):
func = get_compile_time_binop(self)
operand2 = self.operand2.compile_time_value(denv)
try:
result = func(operand1, operand2)
except Exception as e:
self.compile_time_value_error(e)
result = None
if result:
cascade = self.cascade
if cascade:
result = result and cascade.cascaded_compile_time_value(operand2, denv)
return result
def is_cpp_comparison(self):
return self.operand1.type.is_cpp_class or self.operand2.type.is_cpp_class
def find_common_int_type(self, env, op, operand1, operand2):
# type1 != type2 and at least one of the types is not a C int
type1 = operand1.type
type2 = operand2.type
type1_can_be_int = False
type2_can_be_int = False
if operand1.is_string_literal and operand1.can_coerce_to_char_literal():
type1_can_be_int = True
if operand2.is_string_literal and operand2.can_coerce_to_char_literal():
type2_can_be_int = True
if type1.is_int:
if type2_can_be_int:
return type1
elif type2.is_int:
if type1_can_be_int:
return type2
elif type1_can_be_int:
if type2_can_be_int:
if Builtin.unicode_type in (type1, type2):
return PyrexTypes.c_py_ucs4_type
else:
return PyrexTypes.c_uchar_type
return None
def find_common_type(self, env, op, operand1, common_type=None):
operand2 = self.operand2
type1 = operand1.type
type2 = operand2.type
new_common_type = None
# catch general errors
if (type1 == str_type and (type2.is_string or type2 in (bytes_type, unicode_type)) or
type2 == str_type and (type1.is_string or type1 in (bytes_type, unicode_type))):
error(self.pos, "Comparisons between bytes/unicode and str are not portable to Python 3")
new_common_type = error_type
# try to use numeric comparisons where possible
elif type1.is_complex or type2.is_complex:
if (op not in ('==', '!=')
and (type1.is_complex or type1.is_numeric)
and (type2.is_complex or type2.is_numeric)):
error(self.pos, "complex types are unordered")
new_common_type = error_type
elif type1.is_pyobject:
new_common_type = Builtin.complex_type if type1.subtype_of(Builtin.complex_type) else py_object_type
elif type2.is_pyobject:
new_common_type = Builtin.complex_type if type2.subtype_of(Builtin.complex_type) else py_object_type
else:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif type1.is_numeric and type2.is_numeric:
new_common_type = PyrexTypes.widest_numeric_type(type1, type2)
elif common_type is None or not common_type.is_pyobject:
new_common_type = self.find_common_int_type(env, op, operand1, operand2)
if new_common_type is None:
# fall back to generic type compatibility tests
if type1.is_ctuple or type2.is_ctuple:
new_common_type = py_object_type
elif type1 == type2:
new_common_type = type1
elif type1.is_pyobject or type2.is_pyobject:
if type2.is_numeric or type2.is_string:
if operand2.check_for_coercion_error(type1, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif type1.is_numeric or type1.is_string:
if operand1.check_for_coercion_error(type2, env):
new_common_type = error_type
else:
new_common_type = py_object_type
elif py_object_type.assignable_from(type1) and py_object_type.assignable_from(type2):
new_common_type = py_object_type
else:
# one Python type and one non-Python type, not assignable
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
elif type1.assignable_from(type2):
new_common_type = type1
elif type2.assignable_from(type1):
new_common_type = type2
else:
# C types that we couldn't handle up to here are an error
self.invalid_types_error(operand1, op, operand2)
new_common_type = error_type
if new_common_type.is_string and (isinstance(operand1, BytesNode) or
isinstance(operand2, BytesNode)):
# special case when comparing char* to bytes literal: must
# compare string values!
new_common_type = bytes_type
# recursively merge types
if common_type is None or new_common_type.is_error:
common_type = new_common_type
else:
# we could do a lot better by splitting the comparison
# into a non-Python part and a Python part, but this is
# safer for now
common_type = PyrexTypes.spanning_type(common_type, new_common_type)
if self.cascade:
common_type = self.cascade.find_common_type(env, self.operator, operand2, common_type)
return common_type
def invalid_types_error(self, operand1, op, operand2):
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(op, operand1.type, operand2.type))
def is_python_comparison(self):
return (not self.is_ptr_contains()
and not self.is_c_string_contains()
and (self.has_python_operands()
or (self.cascade and self.cascade.is_python_comparison())
or self.operator in ('in', 'not_in')))
def coerce_operands_to(self, dst_type, env):
operand2 = self.operand2
if operand2.type != dst_type:
self.operand2 = operand2.coerce_to(dst_type, env)
if self.cascade:
self.cascade.coerce_operands_to(dst_type, env)
def is_python_result(self):
return ((self.has_python_operands() and
self.special_bool_cmp_function is None and
self.operator not in ('is', 'is_not', 'in', 'not_in') and
not self.is_c_string_contains() and
not self.is_ptr_contains())
or (self.cascade and self.cascade.is_python_result()))
def is_c_string_contains(self):
return self.operator in ('in', 'not_in') and \
((self.operand1.type.is_int
and (self.operand2.type.is_string or self.operand2.type is bytes_type)) or
(self.operand1.type.is_unicode_char
and self.operand2.type is unicode_type))
def is_ptr_contains(self):
if self.operator in ('in', 'not_in'):
container_type = self.operand2.type
return (container_type.is_ptr or container_type.is_array) \
and not container_type.is_string
def find_special_bool_compare_function(self, env, operand1, result_is_bool=False):
# note: currently operand1 must get coerced to a Python object if we succeed here!
if self.operator in ('==', '!='):
type1, type2 = operand1.type, self.operand2.type
if result_is_bool or (type1.is_builtin_type and type2.is_builtin_type):
if type1 is Builtin.unicode_type or type2 is Builtin.unicode_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.bytes_type or type2 is Builtin.bytes_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("BytesEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyBytes_Equals"
return True
elif type1 is Builtin.basestring_type or type2 is Builtin.basestring_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("UnicodeEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_Equals"
return True
elif type1 is Builtin.str_type or type2 is Builtin.str_type:
self.special_bool_cmp_utility_code = UtilityCode.load_cached("StrEquals", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyString_Equals"
return True
elif self.operator in ('in', 'not_in'):
if self.operand2.type is Builtin.dict_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyDictContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PyDict_ContainsTF"
return True
elif self.operand2.type is Builtin.set_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySetContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySet_ContainsTF"
return True
elif self.operand2.type is Builtin.unicode_type:
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PyUnicodeContains", "StringTools.c")
self.special_bool_cmp_function = "__Pyx_PyUnicode_ContainsTF"
return True
else:
if not self.operand2.type.is_pyobject:
self.operand2 = self.operand2.coerce_to_pyobject(env)
self.special_bool_cmp_utility_code = UtilityCode.load_cached("PySequenceContains", "ObjectHandling.c")
self.special_bool_cmp_function = "__Pyx_PySequence_ContainsTF"
return True
return False
def generate_operation_code(self, code, result_code,
operand1, op , operand2):
if self.type.is_pyobject:
error_clause = code.error_goto_if_null
got_ref = "__Pyx_XGOTREF(%s); " % result_code
if self.special_bool_cmp_function:
code.globalstate.use_utility_code(
UtilityCode.load_cached("PyBoolOrNullFromLong", "ObjectHandling.c"))
coerce_result = "__Pyx_PyBoolOrNull_FromLong"
else:
coerce_result = "__Pyx_PyBool_FromLong"
else:
error_clause = code.error_goto_if_neg
got_ref = ""
coerce_result = ""
if self.special_bool_cmp_function:
if operand1.type.is_pyobject:
result1 = operand1.py_result()
else:
result1 = operand1.result()
if operand2.type.is_pyobject:
result2 = operand2.py_result()
else:
result2 = operand2.result()
if self.special_bool_cmp_utility_code:
code.globalstate.use_utility_code(self.special_bool_cmp_utility_code)
code.putln(
"%s = %s(%s(%s, %s, %s)); %s%s" % (
result_code,
coerce_result,
self.special_bool_cmp_function,
result1, result2, richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_pyobject and op not in ('is', 'is_not'):
assert op not in ('in', 'not_in'), op
code.putln("%s = PyObject_RichCompare(%s, %s, %s); %s%s" % (
result_code,
operand1.py_result(),
operand2.py_result(),
richcmp_constants[op],
got_ref,
error_clause(result_code, self.pos)))
elif operand1.type.is_complex:
code.putln("%s = %s(%s%s(%s, %s));" % (
result_code,
coerce_result,
op == "!=" and "!" or "",
operand1.type.unary_op('eq'),
operand1.result(),
operand2.result()))
else:
type1 = operand1.type
type2 = operand2.type
if (type1.is_extension_type or type2.is_extension_type) \
and not type1.same_as(type2):
common_type = py_object_type
elif type1.is_numeric:
common_type = PyrexTypes.widest_numeric_type(type1, type2)
else:
common_type = type1
code1 = operand1.result_as(common_type)
code2 = operand2.result_as(common_type)
statement = "%s = %s(%s %s %s);" % (
result_code,
coerce_result,
code1,
self.c_operator(op),
code2)
if self.is_cpp_comparison() and self.exception_check == '+':
translate_cpp_exception(
code,
self.pos,
statement,
result_code if self.type.is_pyobject else None,
self.exception_value,
self.in_nogil_context)
else:
code.putln(statement)
def c_operator(self, op):
if op == 'is':
return "=="
elif op == 'is_not':
return "!="
else:
return op
class PrimaryCmpNode(ExprNode, CmpNode):
# Non-cascaded comparison or first comparison of
# a cascaded sequence.
#
# operator string
# operand1 ExprNode
# operand2 ExprNode
# cascade CascadedCmpNode
# We don't use the subexprs mechanism, because
# things here are too complicated for it to handle.
# Instead, we override all the framework methods
# which use it.
child_attrs = ['operand1', 'operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
is_memslice_nonecheck = False
def infer_type(self, env):
type1 = self.operand1.infer_type(env)
type2 = self.operand2.infer_type(env)
if is_pythran_expr(type1) or is_pythran_expr(type2):
if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
return PythranExpr(pythran_binop_type(self.operator, type1, type2))
# TODO: implement this for other types.
return py_object_type
def type_dependencies(self, env):
return ()
def calculate_constant_result(self):
assert not self.cascade
self.calculate_cascaded_constant_result(self.operand1.constant_result)
def compile_time_value(self, denv):
operand1 = self.operand1.compile_time_value(denv)
return self.cascaded_compile_time_value(operand1, denv)
def analyse_types(self, env):
self.operand1 = self.operand1.analyse_types(env)
self.operand2 = self.operand2.analyse_types(env)
if self.is_cpp_comparison():
self.analyse_cpp_comparison(env)
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for cpp types.")
return self
type1 = self.operand1.type
type2 = self.operand2.type
if is_pythran_expr(type1) or is_pythran_expr(type2):
if is_pythran_supported_type(type1) and is_pythran_supported_type(type2):
self.type = PythranExpr(pythran_binop_type(self.operator, type1, type2))
self.is_pycmp = False
return self
if self.analyse_memoryviewslice_comparison(env):
return self
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
if self.operator in ('in', 'not_in'):
if self.is_c_string_contains():
self.is_pycmp = False
common_type = None
if self.cascade:
error(self.pos, "Cascading comparison not yet supported for 'int_val in string'.")
return self
if self.operand2.type is unicode_type:
env.use_utility_code(UtilityCode.load_cached("PyUCS4InUnicode", "StringTools.c"))
else:
if self.operand1.type is PyrexTypes.c_uchar_type:
self.operand1 = self.operand1.coerce_to(PyrexTypes.c_char_type, env)
if self.operand2.type is not bytes_type:
self.operand2 = self.operand2.coerce_to(bytes_type, env)
env.use_utility_code(UtilityCode.load_cached("BytesContains", "StringTools.c"))
self.operand2 = self.operand2.as_none_safe_node(
"argument of type 'NoneType' is not iterable")
elif self.is_ptr_contains():
if self.cascade:
error(self.pos, "Cascading comparison not supported for 'val in sliced pointer'.")
self.type = PyrexTypes.c_bint_type
# Will be transformed by IterationTransform
return self
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = py_object_type
self.is_pycmp = True
elif self.find_special_bool_compare_function(env, self.operand1):
if not self.operand1.type.is_pyobject:
self.operand1 = self.operand1.coerce_to_pyobject(env)
common_type = None # if coercion needed, the method call above has already done it
self.is_pycmp = False # result is bint
else:
common_type = self.find_common_type(env, self.operator, self.operand1)
self.is_pycmp = common_type.is_pyobject
if common_type is not None and not common_type.is_error:
if self.operand1.type != common_type:
self.operand1 = self.operand1.coerce_to(common_type, env)
self.coerce_operands_to(common_type, env)
if self.cascade:
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
operand2 = self.cascade.optimise_comparison(self.operand2, env)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
if self.is_python_result():
self.type = PyrexTypes.py_object_type
else:
self.type = PyrexTypes.c_bint_type
cdr = self.cascade
while cdr:
cdr.type = self.type
cdr = cdr.cascade
if self.is_pycmp or self.cascade or self.special_bool_cmp_function:
# 1) owned reference, 2) reused value, 3) potential function error return value
self.is_temp = 1
return self
def analyse_cpp_comparison(self, env):
type1 = self.operand1.type
type2 = self.operand2.type
self.is_pycmp = False
entry = env.lookup_operator(self.operator, [self.operand1, self.operand2])
if entry is None:
error(self.pos, "Invalid types for '%s' (%s, %s)" %
(self.operator, type1, type2))
self.type = PyrexTypes.error_type
self.result_code = ""
return
func_type = entry.type
if func_type.is_ptr:
func_type = func_type.base_type
self.exception_check = func_type.exception_check
self.exception_value = func_type.exception_value
if self.exception_check == '+':
self.is_temp = True
if self.exception_value is None:
env.use_utility_code(UtilityCode.load_cached("CppExceptionConversion", "CppSupport.cpp"))
if len(func_type.args) == 1:
self.operand2 = self.operand2.coerce_to(func_type.args[0].type, env)
else:
self.operand1 = self.operand1.coerce_to(func_type.args[0].type, env)
self.operand2 = self.operand2.coerce_to(func_type.args[1].type, env)
self.type = func_type.return_type
def analyse_memoryviewslice_comparison(self, env):
have_none = self.operand1.is_none or self.operand2.is_none
have_slice = (self.operand1.type.is_memoryviewslice or
self.operand2.type.is_memoryviewslice)
ops = ('==', '!=', 'is', 'is_not')
if have_slice and have_none and self.operator in ops:
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_memslice_nonecheck = True
return True
return False
def coerce_to_boolean(self, env):
if self.is_pycmp:
# coercing to bool => may allow for more efficient comparison code
if self.find_special_bool_compare_function(
env, self.operand1, result_is_bool=True):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
self.is_temp = 1
if self.cascade:
operand2 = self.cascade.optimise_comparison(
self.operand2, env, result_is_bool=True)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return self
# TODO: check if we can optimise parts of the cascade here
return ExprNode.coerce_to_boolean(self, env)
def has_python_operands(self):
return (self.operand1.type.is_pyobject
or self.operand2.type.is_pyobject)
def check_const(self):
if self.cascade:
self.not_const()
return False
else:
return self.operand1.check_const() and self.operand2.check_const()
def calculate_result_code(self):
operand1, operand2 = self.operand1, self.operand2
if operand1.type.is_complex:
if self.operator == "!=":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
operand1.type.binary_op('=='),
operand1.result(),
operand2.result())
elif self.is_c_string_contains():
if operand2.type is unicode_type:
method = "__Pyx_UnicodeContainsUCS4"
else:
method = "__Pyx_BytesContains"
if self.operator == "not_in":
negation = "!"
else:
negation = ""
return "(%s%s(%s, %s))" % (
negation,
method,
operand2.result(),
operand1.result())
else:
if is_pythran_expr(self.type):
result1, result2 = operand1.pythran_result(), operand2.pythran_result()
else:
result1, result2 = operand1.result(), operand2.result()
if self.is_memslice_nonecheck:
if operand1.type.is_memoryviewslice:
result1 = "((PyObject *) %s.memview)" % result1
else:
result2 = "((PyObject *) %s.memview)" % result2
return "(%s %s %s)" % (
result1,
self.c_operator(self.operator),
result2)
def generate_evaluation_code(self, code):
self.operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
if self.is_temp:
self.allocate_temp_result(code)
self.generate_operation_code(code, self.result(),
self.operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, self.result(), self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
self.operand1.generate_disposal_code(code)
self.operand1.free_temps(code)
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
def generate_subexpr_disposal_code(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.generate_disposal_code(code)
self.operand2.generate_disposal_code(code)
def free_subexpr_temps(self, code):
# If this is called, it is a non-cascaded cmp,
# so only need to dispose of the two main operands.
self.operand1.free_temps(code)
self.operand2.free_temps(code)
def annotate(self, code):
self.operand1.annotate(code)
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
class CascadedCmpNode(Node, CmpNode):
# A CascadedCmpNode is not a complete expression node. It
# hangs off the side of another comparison node, shares
# its left operand with that node, and shares its result
# with the PrimaryCmpNode at the head of the chain.
#
# operator string
# operand2 ExprNode
# cascade CascadedCmpNode
child_attrs = ['operand2', 'coerced_operand2', 'cascade']
cascade = None
coerced_operand2 = None
constant_result = constant_value_not_set # FIXME: where to calculate this?
def infer_type(self, env):
# TODO: Actually implement this (after merging with -unstable).
return py_object_type
def type_dependencies(self, env):
return ()
def has_constant_result(self):
return self.constant_result is not constant_value_not_set and \
self.constant_result is not not_a_constant
def analyse_types(self, env):
self.operand2 = self.operand2.analyse_types(env)
if self.cascade:
self.cascade = self.cascade.analyse_types(env)
return self
def has_python_operands(self):
return self.operand2.type.is_pyobject
def is_cpp_comparison(self):
# cascaded comparisons aren't currently implemented for c++ classes.
return False
def optimise_comparison(self, operand1, env, result_is_bool=False):
if self.find_special_bool_compare_function(env, operand1, result_is_bool):
self.is_pycmp = False
self.type = PyrexTypes.c_bint_type
if not operand1.type.is_pyobject:
operand1 = operand1.coerce_to_pyobject(env)
if self.cascade:
operand2 = self.cascade.optimise_comparison(self.operand2, env, result_is_bool)
if operand2 is not self.operand2:
self.coerced_operand2 = operand2
return operand1
def coerce_operands_to_pyobjects(self, env):
self.operand2 = self.operand2.coerce_to_pyobject(env)
if self.operand2.type is dict_type and self.operator in ('in', 'not_in'):
self.operand2 = self.operand2.as_none_safe_node("'NoneType' object is not iterable")
if self.cascade:
self.cascade.coerce_operands_to_pyobjects(env)
def coerce_cascaded_operands_to_temp(self, env):
if self.cascade:
#self.operand2 = self.operand2.coerce_to_temp(env) #CTT
self.operand2 = self.operand2.coerce_to_simple(env)
self.cascade.coerce_cascaded_operands_to_temp(env)
def generate_evaluation_code(self, code, result, operand1, needs_evaluation=False):
if self.type.is_pyobject:
code.putln("if (__Pyx_PyObject_IsTrue(%s)) {" % result)
code.put_decref(result, self.type)
else:
code.putln("if (%s) {" % result)
if needs_evaluation:
operand1.generate_evaluation_code(code)
self.operand2.generate_evaluation_code(code)
self.generate_operation_code(code, result,
operand1, self.operator, self.operand2)
if self.cascade:
self.cascade.generate_evaluation_code(
code, result, self.coerced_operand2 or self.operand2,
needs_evaluation=self.coerced_operand2 is not None)
if needs_evaluation:
operand1.generate_disposal_code(code)
operand1.free_temps(code)
# Cascaded cmp result is always temp
self.operand2.generate_disposal_code(code)
self.operand2.free_temps(code)
code.putln("}")
def annotate(self, code):
self.operand2.annotate(code)
if self.cascade:
self.cascade.annotate(code)
binop_node_classes = {
"or": BoolBinopNode,
"and": BoolBinopNode,
"|": IntBinopNode,
"^": IntBinopNode,
"&": IntBinopNode,
"<<": IntBinopNode,
">>": IntBinopNode,
"+": AddNode,
"-": SubNode,
"*": MulNode,
"@": MatMultNode,
"/": DivNode,
"//": DivNode,
"%": ModNode,
"**": PowNode,
}
def binop_node(pos, operator, operand1, operand2, inplace=False, **kwargs):
# Construct binop node of appropriate class for
# given operator.
return binop_node_classes[operator](
pos,
operator=operator,
operand1=operand1,
operand2=operand2,
inplace=inplace,
**kwargs)
#-------------------------------------------------------------------
#
# Coercion nodes
#
# Coercion nodes are special in that they are created during
# the analyse_types phase of parse tree processing.
# Their __init__ methods consequently incorporate some aspects
# of that phase.
#
#-------------------------------------------------------------------
class CoercionNode(ExprNode):
# Abstract base class for coercion nodes.
#
# arg ExprNode node being coerced
subexprs = ['arg']
constant_result = not_a_constant
def __init__(self, arg):
super(CoercionNode, self).__init__(arg.pos)
self.arg = arg
if debug_coercion:
print("%s Coercing %s" % (self, self.arg))
def calculate_constant_result(self):
# constant folding can break type coercion, so this is disabled
pass
def annotate(self, code):
self.arg.annotate(code)
if self.arg.type != self.type:
file, line, col = self.pos
code.annotate((file, line, col-1), AnnotationItem(
style='coerce', tag='coerce', text='[%s] to [%s]' % (self.arg.type, self.type)))
class CoerceToMemViewSliceNode(CoercionNode):
"""
Coerce an object to a memoryview slice. This holds a new reference in
a managed temp.
"""
def __init__(self, arg, dst_type, env):
assert dst_type.is_memoryviewslice
assert not arg.type.is_memoryviewslice
CoercionNode.__init__(self, arg)
self.type = dst_type
self.is_temp = 1
self.use_managed_ref = True
self.arg = arg
self.type.create_from_py_utility_code(env)
def generate_result_code(self, code):
code.putln(self.type.from_py_call_code(
self.arg.py_result(),
self.result(),
self.pos,
code
))
class CastNode(CoercionNode):
# Wrap a node in a C type cast.
def __init__(self, arg, new_type):
CoercionNode.__init__(self, arg)
self.type = new_type
def may_be_none(self):
return self.arg.may_be_none()
def calculate_result_code(self):
return self.arg.result_as(self.type)
def generate_result_code(self, code):
self.arg.generate_result_code(code)
class PyTypeTestNode(CoercionNode):
# This node is used to check that a generic Python
# object is an instance of a particular extension type.
# This node borrows the result of its argument node.
exact_builtin_type = True
def __init__(self, arg, dst_type, env, notnone=False):
# The arg is know to be a Python object, and
# the dst_type is known to be an extension type.
assert dst_type.is_extension_type or dst_type.is_builtin_type, "PyTypeTest on non extension type"
CoercionNode.__init__(self, arg)
self.type = dst_type
self.result_ctype = arg.ctype()
self.notnone = notnone
nogil_check = Node.gil_error
gil_message = "Python type test"
def analyse_types(self, env):
return self
def may_be_none(self):
if self.notnone:
return False
return self.arg.may_be_none()
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def is_ephemeral(self):
return self.arg.is_ephemeral()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def reanalyse(self):
if self.type != self.arg.type or not self.arg.is_temp:
return self
if not self.type.typeobj_is_available():
return self
if self.arg.may_be_none() and self.notnone:
return self.arg.as_none_safe_node("Cannot convert NoneType to %.200s" % self.type.name)
return self.arg
def calculate_constant_result(self):
# FIXME
pass
def calculate_result_code(self):
return self.arg.result()
def generate_result_code(self, code):
if self.type.typeobj_is_available():
if self.type.is_builtin_type:
type_test = self.type.type_test_code(
self.arg.py_result(),
self.notnone, exact=self.exact_builtin_type)
else:
type_test = self.type.type_test_code(
self.arg.py_result(), self.notnone)
code.globalstate.use_utility_code(
UtilityCode.load_cached("ExtTypeTest", "ObjectHandling.c"))
code.putln("if (!(%s)) %s" % (
type_test, code.error_goto(self.pos)))
else:
error(self.pos, "Cannot test type of extern C class "
"without type object name specification")
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def allocate_temp_result(self, code):
pass
def release_temp_result(self, code):
pass
def free_temps(self, code):
self.arg.free_temps(code)
def free_subexpr_temps(self, code):
self.arg.free_subexpr_temps(code)
class NoneCheckNode(CoercionNode):
# This node is used to check that a Python object is not None and
# raises an appropriate exception (as specified by the creating
# transform).
is_nonecheck = True
def __init__(self, arg, exception_type_cname, exception_message,
exception_format_args=()):
CoercionNode.__init__(self, arg)
self.type = arg.type
self.result_ctype = arg.ctype()
self.exception_type_cname = exception_type_cname
self.exception_message = exception_message
self.exception_format_args = tuple(exception_format_args or ())
nogil_check = None # this node only guards an operation that would fail already
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def is_simple(self):
return self.arg.is_simple()
def result_in_temp(self):
return self.arg.result_in_temp()
def nonlocally_immutable(self):
return self.arg.nonlocally_immutable()
def calculate_result_code(self):
return self.arg.result()
def condition(self):
if self.type.is_pyobject:
return self.arg.py_result()
elif self.type.is_memoryviewslice:
return "((PyObject *) %s.memview)" % self.arg.result()
else:
raise Exception("unsupported type")
@classmethod
def generate(cls, arg, code, exception_message,
exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
node = cls(arg, exception_type_cname, exception_message, exception_format_args)
node.in_nogil_context = in_nogil_context
node.put_nonecheck(code)
@classmethod
def generate_if_needed(cls, arg, code, exception_message,
exception_type_cname="PyExc_TypeError", exception_format_args=(), in_nogil_context=False):
if arg.may_be_none():
cls.generate(arg, code, exception_message, exception_type_cname, exception_format_args, in_nogil_context)
def put_nonecheck(self, code):
code.putln(
"if (unlikely(%s == Py_None)) {" % self.condition())
if self.in_nogil_context:
code.put_ensure_gil()
escape = StringEncoding.escape_byte_string
if self.exception_format_args:
code.putln('PyErr_Format(%s, "%s", %s);' % (
self.exception_type_cname,
StringEncoding.escape_byte_string(
self.exception_message.encode('UTF-8')),
', '.join([ '"%s"' % escape(str(arg).encode('UTF-8'))
for arg in self.exception_format_args ])))
else:
code.putln('PyErr_SetString(%s, "%s");' % (
self.exception_type_cname,
escape(self.exception_message.encode('UTF-8'))))
if self.in_nogil_context:
code.put_release_ensured_gil()
code.putln(code.error_goto(self.pos))
code.putln("}")
def generate_result_code(self, code):
self.put_nonecheck(code)
def generate_post_assignment_code(self, code):
self.arg.generate_post_assignment_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CoerceToPyTypeNode(CoercionNode):
# This node is used to convert a C data type
# to a Python object.
type = py_object_type
target_type = py_object_type
is_temp = 1
def __init__(self, arg, env, type=py_object_type):
if not arg.type.create_to_py_utility_code(env):
error(arg.pos, "Cannot convert '%s' to Python object" % arg.type)
elif arg.type.is_complex:
# special case: complex coercion is so complex that it
# uses a macro ("__pyx_PyComplex_FromComplex()"), for
# which the argument must be simple
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
if type is py_object_type:
# be specific about some known types
if arg.type.is_string or arg.type.is_cpp_string:
self.type = default_str_type(env)
elif arg.type.is_pyunicode_ptr or arg.type.is_unicode_char:
self.type = unicode_type
elif arg.type.is_complex:
self.type = Builtin.complex_type
self.target_type = self.type
elif arg.type.is_string or arg.type.is_cpp_string:
if (type not in (bytes_type, bytearray_type)
and not env.directives['c_string_encoding']):
error(arg.pos,
"default encoding required for conversion from '%s' to '%s'" %
(arg.type, type))
self.type = self.target_type = type
else:
# FIXME: check that the target type and the resulting type are compatible
self.target_type = type
gil_message = "Converting to Python object"
def may_be_none(self):
# FIXME: is this always safe?
return False
def coerce_to_boolean(self, env):
arg_type = self.arg.type
if (arg_type == PyrexTypes.c_bint_type or
(arg_type.is_pyobject and arg_type.name == 'bool')):
return self.arg.coerce_to_temp(env)
else:
return CoerceToBooleanNode(self, env)
def coerce_to_integer(self, env):
# If not already some C integer type, coerce to longint.
if self.arg.type.is_int:
return self.arg
else:
return self.arg.coerce_to(PyrexTypes.c_long_type, env)
def analyse_types(self, env):
# The arg is always already analysed
return self
def generate_result_code(self, code):
code.putln('%s; %s' % (
self.arg.type.to_py_call_code(
self.arg.result(),
self.result(),
self.target_type),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.py_result())
class CoerceIntToBytesNode(CoerceToPyTypeNode):
# This node is used to convert a C int type to a Python bytes
# object.
is_temp = 1
def __init__(self, arg, env):
arg = arg.coerce_to_simple(env)
CoercionNode.__init__(self, arg)
self.type = Builtin.bytes_type
def generate_result_code(self, code):
arg = self.arg
arg_result = arg.result()
if arg.type not in (PyrexTypes.c_char_type,
PyrexTypes.c_uchar_type,
PyrexTypes.c_schar_type):
if arg.type.signed:
code.putln("if ((%s < 0) || (%s > 255)) {" % (
arg_result, arg_result))
else:
code.putln("if (%s > 255) {" % arg_result)
code.putln('PyErr_SetString(PyExc_OverflowError, '
'"value too large to pack into a byte"); %s' % (
code.error_goto(self.pos)))
code.putln('}')
temp = None
if arg.type is not PyrexTypes.c_char_type:
temp = code.funcstate.allocate_temp(PyrexTypes.c_char_type, manage_ref=False)
code.putln("%s = (char)%s;" % (temp, arg_result))
arg_result = temp
code.putln('%s = PyBytes_FromStringAndSize(&%s, 1); %s' % (
self.result(),
arg_result,
code.error_goto_if_null(self.result(), self.pos)))
if temp is not None:
code.funcstate.release_temp(temp)
code.put_gotref(self.py_result())
class CoerceFromPyTypeNode(CoercionNode):
# This node is used to convert a Python object
# to a C data type.
def __init__(self, result_type, arg, env):
CoercionNode.__init__(self, arg)
self.type = result_type
self.is_temp = 1
if not result_type.create_from_py_utility_code(env):
error(arg.pos,
"Cannot convert Python object to '%s'" % result_type)
if self.type.is_string or self.type.is_pyunicode_ptr:
if self.arg.is_name and self.arg.entry and self.arg.entry.is_pyglobal:
warning(arg.pos,
"Obtaining '%s' from externally modifiable global Python value" % result_type,
level=1)
def analyse_types(self, env):
# The arg is always already analysed
return self
def is_ephemeral(self):
return (self.type.is_ptr and not self.type.is_array) and self.arg.is_ephemeral()
def generate_result_code(self, code):
from_py_function = None
# for certain source types, we can do better than the generic coercion
if self.type.is_string and self.arg.type is bytes_type:
if self.type.from_py_function.startswith('__Pyx_PyObject_As'):
from_py_function = '__Pyx_PyBytes' + self.type.from_py_function[len('__Pyx_PyObject'):]
NoneCheckNode.generate_if_needed(self.arg, code, "expected bytes, NoneType found")
code.putln(self.type.from_py_call_code(
self.arg.py_result(), self.result(), self.pos, code, from_py_function=from_py_function))
if self.type.is_pyobject:
code.put_gotref(self.py_result())
def nogil_check(self, env):
error(self.pos, "Coercion from Python not allowed without the GIL")
class CoerceToBooleanNode(CoercionNode):
# This node is used when a result needs to be used
# in a boolean context.
type = PyrexTypes.c_bint_type
_special_builtins = {
Builtin.list_type: 'PyList_GET_SIZE',
Builtin.tuple_type: 'PyTuple_GET_SIZE',
Builtin.set_type: 'PySet_GET_SIZE',
Builtin.frozenset_type: 'PySet_GET_SIZE',
Builtin.bytes_type: 'PyBytes_GET_SIZE',
Builtin.bytearray_type: 'PyByteArray_GET_SIZE',
Builtin.unicode_type: '__Pyx_PyUnicode_IS_TRUE',
}
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
if arg.type.is_pyobject:
self.is_temp = 1
def nogil_check(self, env):
if self.arg.type.is_pyobject and self._special_builtins.get(self.arg.type) is None:
self.gil_error()
gil_message = "Truth-testing Python object"
def check_const(self):
if self.is_temp:
self.not_const()
return False
return self.arg.check_const()
def calculate_result_code(self):
return "(%s != 0)" % self.arg.result()
def generate_result_code(self, code):
if not self.is_temp:
return
test_func = self._special_builtins.get(self.arg.type)
if test_func is not None:
checks = ["(%s != Py_None)" % self.arg.py_result()] if self.arg.may_be_none() else []
checks.append("(%s(%s) != 0)" % (test_func, self.arg.py_result()))
code.putln("%s = %s;" % (self.result(), '&&'.join(checks)))
else:
code.putln(
"%s = __Pyx_PyObject_IsTrue(%s); %s" % (
self.result(),
self.arg.py_result(),
code.error_goto_if_neg(self.result(), self.pos)))
class CoerceToComplexNode(CoercionNode):
def __init__(self, arg, dst_type, env):
if arg.type.is_complex:
arg = arg.coerce_to_simple(env)
self.type = dst_type
CoercionNode.__init__(self, arg)
dst_type.create_declaration_utility_code(env)
def calculate_result_code(self):
if self.arg.type.is_complex:
real_part = "__Pyx_CREAL(%s)" % self.arg.result()
imag_part = "__Pyx_CIMAG(%s)" % self.arg.result()
else:
real_part = self.arg.result()
imag_part = "0"
return "%s(%s, %s)" % (
self.type.from_parts,
real_part,
imag_part)
def generate_result_code(self, code):
pass
class CoerceToTempNode(CoercionNode):
# This node is used to force the result of another node
# to be stored in a temporary. It is only used if the
# argument node's result is not already in a temporary.
def __init__(self, arg, env):
CoercionNode.__init__(self, arg)
self.type = self.arg.type.as_argument_type()
self.constant_result = self.arg.constant_result
self.is_temp = 1
if self.type.is_pyobject:
self.result_ctype = py_object_type
gil_message = "Creating temporary Python reference"
def analyse_types(self, env):
# The arg is always already analysed
return self
def coerce_to_boolean(self, env):
self.arg = self.arg.coerce_to_boolean(env)
if self.arg.is_simple():
return self.arg
self.type = self.arg.type
self.result_ctype = self.type
return self
def generate_result_code(self, code):
#self.arg.generate_evaluation_code(code) # Already done
# by generic generate_subexpr_evaluation_code!
code.putln("%s = %s;" % (
self.result(), self.arg.result_as(self.ctype())))
if self.use_managed_ref:
if self.type.is_pyobject:
code.put_incref(self.result(), self.ctype())
elif self.type.is_memoryviewslice:
code.put_incref_memoryviewslice(self.result(),
not self.in_nogil_context)
class ProxyNode(CoercionNode):
"""
A node that should not be replaced by transforms or other means,
and hence can be useful to wrap the argument to a clone node
MyNode -> ProxyNode -> ArgNode
CloneNode -^
"""
nogil_check = None
def __init__(self, arg):
super(ProxyNode, self).__init__(arg)
self.constant_result = arg.constant_result
self._proxy_type()
def analyse_types(self, env):
self.arg = self.arg.analyse_expressions(env)
self._proxy_type()
return self
def infer_type(self, env):
return self.arg.infer_type(env)
def _proxy_type(self):
if hasattr(self.arg, 'type'):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
def generate_result_code(self, code):
self.arg.generate_result_code(code)
def result(self):
return self.arg.result()
def is_simple(self):
return self.arg.is_simple()
def may_be_none(self):
return self.arg.may_be_none()
def generate_evaluation_code(self, code):
self.arg.generate_evaluation_code(code)
def generate_disposal_code(self, code):
self.arg.generate_disposal_code(code)
def free_temps(self, code):
self.arg.free_temps(code)
class CloneNode(CoercionNode):
# This node is employed when the result of another node needs
# to be used multiple times. The argument node's result must
# be in a temporary. This node "borrows" the result from the
# argument node, and does not generate any evaluation or
# disposal code for it. The original owner of the argument
# node is responsible for doing those things.
subexprs = [] # Arg is not considered a subexpr
nogil_check = None
def __init__(self, arg):
CoercionNode.__init__(self, arg)
self.constant_result = arg.constant_result
if hasattr(arg, 'type'):
self.type = arg.type
self.result_ctype = arg.result_ctype
if hasattr(arg, 'entry'):
self.entry = arg.entry
def result(self):
return self.arg.result()
def may_be_none(self):
return self.arg.may_be_none()
def type_dependencies(self, env):
return self.arg.type_dependencies(env)
def infer_type(self, env):
return self.arg.infer_type(env)
def analyse_types(self, env):
self.type = self.arg.type
self.result_ctype = self.arg.result_ctype
self.is_temp = 1
if hasattr(self.arg, 'entry'):
self.entry = self.arg.entry
return self
def coerce_to(self, dest_type, env):
if self.arg.is_literal:
return self.arg.coerce_to(dest_type, env)
return super(CloneNode, self).coerce_to(dest_type, env)
def is_simple(self):
return True # result is always in a temp (or a name)
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def free_temps(self, code):
pass
class CMethodSelfCloneNode(CloneNode):
# Special CloneNode for the self argument of builtin C methods
# that accepts subtypes of the builtin type. This is safe only
# for 'final' subtypes, as subtypes of the declared type may
# override the C method.
def coerce_to(self, dst_type, env):
if dst_type.is_builtin_type and self.type.subtype_of(dst_type):
return self
return CloneNode.coerce_to(self, dst_type, env)
class ModuleRefNode(ExprNode):
# Simple returns the module object
type = py_object_type
is_temp = False
subexprs = []
def analyse_types(self, env):
return self
def may_be_none(self):
return False
def calculate_result_code(self):
return Naming.module_cname
def generate_result_code(self, code):
pass
class DocstringRefNode(ExprNode):
# Extracts the docstring of the body element
subexprs = ['body']
type = py_object_type
is_temp = True
def __init__(self, pos, body):
ExprNode.__init__(self, pos)
assert body.type.is_pyobject
self.body = body
def analyse_types(self, env):
return self
def generate_result_code(self, code):
code.putln('%s = __Pyx_GetAttr(%s, %s); %s' % (
self.result(), self.body.result(),
code.intern_identifier(StringEncoding.EncodedString("__doc__")),
code.error_goto_if_null(self.result(), self.pos)))
code.put_gotref(self.result())
#------------------------------------------------------------------------------------
#
# Runtime support code
#
#------------------------------------------------------------------------------------
pyerr_occurred_withgil_utility_code= UtilityCode(
proto = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void); /* proto */
""",
impl = """
static CYTHON_INLINE int __Pyx_ErrOccurredWithGIL(void) {
int err;
#ifdef WITH_THREAD
PyGILState_STATE _save = PyGILState_Ensure();
#endif
err = !!PyErr_Occurred();
#ifdef WITH_THREAD
PyGILState_Release(_save);
#endif
return err;
}
"""
)
#------------------------------------------------------------------------------------
raise_unbound_local_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
""")
raise_closure_name_error_utility_code = UtilityCode(
proto = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
""",
impl = """
static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
}
""")
# Don't inline the function, it should really never be called in production
raise_unbound_memoryview_utility_code_nogil = UtilityCode(
proto = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname);
""",
impl = """
static void __Pyx_RaiseUnboundMemoryviewSliceNogil(const char *varname) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseUnboundLocalError(varname);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
""",
requires = [raise_unbound_local_error_utility_code])
#------------------------------------------------------------------------------------
raise_too_many_values_to_unpack = UtilityCode.load_cached("RaiseTooManyValuesToUnpack", "ObjectHandling.c")
raise_need_more_values_to_unpack = UtilityCode.load_cached("RaiseNeedMoreValuesToUnpack", "ObjectHandling.c")
tuple_unpacking_error_code = UtilityCode.load_cached("UnpackTupleError", "ObjectHandling.c")
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.499433
Cython-0.29.28/Cython/Compiler/FlowControl.pxd 0000644 0001751 0000171 00000005546 00000000000 021755 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
cimport cython
from .Visitor cimport CythonTransform, TreeVisitor
cdef class ControlBlock:
cdef public set children
cdef public set parents
cdef public set positions
cdef public list stats
cdef public dict gen
cdef public set bounded
# Big integer bitsets
cdef public object i_input
cdef public object i_output
cdef public object i_gen
cdef public object i_kill
cdef public object i_state
cpdef bint empty(self)
cpdef detach(self)
cpdef add_child(self, block)
cdef class ExitBlock(ControlBlock):
cpdef bint empty(self)
cdef class NameAssignment:
cdef public bint is_arg
cdef public bint is_deletion
cdef public object lhs
cdef public object rhs
cdef public object entry
cdef public object pos
cdef public set refs
cdef public object bit
cdef public object inferred_type
cdef class AssignmentList:
cdef public object bit
cdef public object mask
cdef public list stats
cdef class AssignmentCollector(TreeVisitor):
cdef list assignments
@cython.final
cdef class ControlFlow:
cdef public set blocks
cdef public set entries
cdef public list loops
cdef public list exceptions
cdef public ControlBlock entry_point
cdef public ExitBlock exit_point
cdef public ControlBlock block
cdef public dict assmts
cpdef newblock(self, ControlBlock parent=*)
cpdef nextblock(self, ControlBlock parent=*)
cpdef bint is_tracked(self, entry)
cpdef bint is_statically_assigned(self, entry)
cpdef mark_position(self, node)
cpdef mark_assignment(self, lhs, rhs, entry)
cpdef mark_argument(self, lhs, rhs, entry)
cpdef mark_deletion(self, node, entry)
cpdef mark_reference(self, node, entry)
@cython.locals(block=ControlBlock, parent=ControlBlock, unreachable=set)
cpdef normalize(self)
@cython.locals(bit=object, assmts=AssignmentList,
block=ControlBlock)
cpdef initialize(self)
@cython.locals(assmts=AssignmentList, assmt=NameAssignment)
cpdef set map_one(self, istate, entry)
@cython.locals(block=ControlBlock, parent=ControlBlock)
cdef reaching_definitions(self)
cdef class Uninitialized:
pass
cdef class Unknown:
pass
cdef class MessageCollection:
cdef set messages
@cython.locals(dirty=bint, block=ControlBlock, parent=ControlBlock,
assmt=NameAssignment)
cdef check_definitions(ControlFlow flow, dict compiler_directives)
@cython.final
cdef class ControlFlowAnalysis(CythonTransform):
cdef object gv_ctx
cdef object constant_folder
cdef set reductions
cdef list env_stack
cdef list stack
cdef object env
cdef ControlFlow flow
cdef bint in_inplace_assignment
cpdef mark_assignment(self, lhs, rhs=*)
cpdef mark_position(self, node)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.499433
Cython-0.29.28/Cython/Compiler/FlowControl.py 0000644 0001751 0000171 00000131635 00000000000 021611 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
import cython
cython.declare(PyrexTypes=object, ExprNodes=object, Nodes=object,
Builtin=object, InternalError=object, error=object, warning=object,
py_object_type=object, unspecified_type=object,
object_expr=object, fake_rhs_expr=object, TypedExprNode=object)
from . import Builtin
from . import ExprNodes
from . import Nodes
from . import Options
from .PyrexTypes import py_object_type, unspecified_type
from . import PyrexTypes
from .Visitor import TreeVisitor, CythonTransform
from .Errors import error, warning, InternalError
from .Optimize import ConstantFolding
class TypedExprNode(ExprNodes.ExprNode):
# Used for declaring assignments of a specified type without a known entry.
def __init__(self, type, may_be_none=None, pos=None):
super(TypedExprNode, self).__init__(pos)
self.type = type
self._may_be_none = may_be_none
def may_be_none(self):
return self._may_be_none != False
object_expr = TypedExprNode(py_object_type, may_be_none=True)
# Fake rhs to silence "unused variable" warning
fake_rhs_expr = TypedExprNode(unspecified_type)
class ControlBlock(object):
"""Control flow graph node. Sequence of assignments and name references.
children set of children nodes
parents set of parent nodes
positions set of position markers
stats list of block statements
gen dict of assignments generated by this block
bounded set of entries that are definitely bounded in this block
Example:
a = 1
b = a + c # 'c' is already bounded or exception here
stats = [Assignment(a), NameReference(a), NameReference(c),
Assignment(b)]
gen = {Entry(a): Assignment(a), Entry(b): Assignment(b)}
bounded = set([Entry(a), Entry(c)])
"""
def __init__(self):
self.children = set()
self.parents = set()
self.positions = set()
self.stats = []
self.gen = {}
self.bounded = set()
self.i_input = 0
self.i_output = 0
self.i_gen = 0
self.i_kill = 0
self.i_state = 0
def empty(self):
return (not self.stats and not self.positions)
def detach(self):
"""Detach block from parents and children."""
for child in self.children:
child.parents.remove(self)
for parent in self.parents:
parent.children.remove(self)
self.parents.clear()
self.children.clear()
def add_child(self, block):
self.children.add(block)
block.parents.add(self)
class ExitBlock(ControlBlock):
"""Non-empty exit point block."""
def empty(self):
return False
class AssignmentList(object):
def __init__(self):
self.stats = []
class ControlFlow(object):
"""Control-flow graph.
entry_point ControlBlock entry point for this graph
exit_point ControlBlock normal exit point
block ControlBlock current block
blocks set children nodes
entries set tracked entries
loops list stack for loop descriptors
exceptions list stack for exception descriptors
"""
def __init__(self):
self.blocks = set()
self.entries = set()
self.loops = []
self.exceptions = []
self.entry_point = ControlBlock()
self.exit_point = ExitBlock()
self.blocks.add(self.exit_point)
self.block = self.entry_point
def newblock(self, parent=None):
"""Create floating block linked to `parent` if given.
NOTE: Block is NOT added to self.blocks
"""
block = ControlBlock()
self.blocks.add(block)
if parent:
parent.add_child(block)
return block
def nextblock(self, parent=None):
"""Create block children block linked to current or `parent` if given.
NOTE: Block is added to self.blocks
"""
block = ControlBlock()
self.blocks.add(block)
if parent:
parent.add_child(block)
elif self.block:
self.block.add_child(block)
self.block = block
return self.block
def is_tracked(self, entry):
if entry.is_anonymous:
return False
return (entry.is_local or entry.is_pyclass_attr or entry.is_arg or
entry.from_closure or entry.in_closure or
entry.error_on_uninitialized)
def is_statically_assigned(self, entry):
if (entry.is_local and entry.is_variable and
(entry.type.is_struct_or_union or
entry.type.is_complex or
entry.type.is_array or
entry.type.is_cpp_class)):
# stack allocated structured variable => never uninitialised
return True
return False
def mark_position(self, node):
"""Mark position, will be used to draw graph nodes."""
if self.block:
self.block.positions.add(node.pos[:2])
def mark_assignment(self, lhs, rhs, entry):
if self.block and self.is_tracked(entry):
assignment = NameAssignment(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_argument(self, lhs, rhs, entry):
if self.block and self.is_tracked(entry):
assignment = Argument(lhs, rhs, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = assignment
self.entries.add(entry)
def mark_deletion(self, node, entry):
if self.block and self.is_tracked(entry):
assignment = NameDeletion(node, entry)
self.block.stats.append(assignment)
self.block.gen[entry] = Uninitialized
self.entries.add(entry)
def mark_reference(self, node, entry):
if self.block and self.is_tracked(entry):
self.block.stats.append(NameReference(node, entry))
## XXX: We don't track expression evaluation order so we can't use
## XXX: successful reference as initialization sign.
## # Local variable is definitely bound after this reference
## if not node.allow_null:
## self.block.bounded.add(entry)
self.entries.add(entry)
def normalize(self):
"""Delete unreachable and orphan blocks."""
queue = set([self.entry_point])
visited = set()
while queue:
root = queue.pop()
visited.add(root)
for child in root.children:
if child not in visited:
queue.add(child)
unreachable = self.blocks - visited
for block in unreachable:
block.detach()
visited.remove(self.entry_point)
for block in visited:
if block.empty():
for parent in block.parents: # Re-parent
for child in block.children:
parent.add_child(child)
block.detach()
unreachable.add(block)
self.blocks -= unreachable
def initialize(self):
"""Set initial state, map assignments to bits."""
self.assmts = {}
bit = 1
for entry in self.entries:
assmts = AssignmentList()
assmts.mask = assmts.bit = bit
self.assmts[entry] = assmts
bit <<= 1
for block in self.blocks:
for stat in block.stats:
if isinstance(stat, NameAssignment):
stat.bit = bit
assmts = self.assmts[stat.entry]
assmts.stats.append(stat)
assmts.mask |= bit
bit <<= 1
for block in self.blocks:
for entry, stat in block.gen.items():
assmts = self.assmts[entry]
if stat is Uninitialized:
block.i_gen |= assmts.bit
else:
block.i_gen |= stat.bit
block.i_kill |= assmts.mask
block.i_output = block.i_gen
for entry in block.bounded:
block.i_kill |= self.assmts[entry].bit
for assmts in self.assmts.values():
self.entry_point.i_gen |= assmts.bit
self.entry_point.i_output = self.entry_point.i_gen
def map_one(self, istate, entry):
ret = set()
assmts = self.assmts[entry]
if istate & assmts.bit:
if self.is_statically_assigned(entry):
ret.add(StaticAssignment(entry))
elif entry.from_closure:
ret.add(Unknown)
else:
ret.add(Uninitialized)
for assmt in assmts.stats:
if istate & assmt.bit:
ret.add(assmt)
return ret
def reaching_definitions(self):
"""Per-block reaching definitions analysis."""
dirty = True
while dirty:
dirty = False
for block in self.blocks:
i_input = 0
for parent in block.parents:
i_input |= parent.i_output
i_output = (i_input & ~block.i_kill) | block.i_gen
if i_output != block.i_output:
dirty = True
block.i_input = i_input
block.i_output = i_output
class LoopDescr(object):
def __init__(self, next_block, loop_block):
self.next_block = next_block
self.loop_block = loop_block
self.exceptions = []
class ExceptionDescr(object):
"""Exception handling helper.
entry_point ControlBlock Exception handling entry point
finally_enter ControlBlock Normal finally clause entry point
finally_exit ControlBlock Normal finally clause exit point
"""
def __init__(self, entry_point, finally_enter=None, finally_exit=None):
self.entry_point = entry_point
self.finally_enter = finally_enter
self.finally_exit = finally_exit
class NameAssignment(object):
def __init__(self, lhs, rhs, entry):
if lhs.cf_state is None:
lhs.cf_state = set()
self.lhs = lhs
self.rhs = rhs
self.entry = entry
self.pos = lhs.pos
self.refs = set()
self.is_arg = False
self.is_deletion = False
self.inferred_type = None
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
def infer_type(self):
self.inferred_type = self.rhs.infer_type(self.entry.scope)
return self.inferred_type
def type_dependencies(self):
return self.rhs.type_dependencies(self.entry.scope)
@property
def type(self):
if not self.entry.type.is_unspecified:
return self.entry.type
return self.inferred_type
class StaticAssignment(NameAssignment):
"""Initialised at declaration time, e.g. stack allocation."""
def __init__(self, entry):
if not entry.type.is_pyobject:
may_be_none = False
else:
may_be_none = None # unknown
lhs = TypedExprNode(
entry.type, may_be_none=may_be_none, pos=entry.pos)
super(StaticAssignment, self).__init__(lhs, lhs, entry)
def infer_type(self):
return self.entry.type
def type_dependencies(self):
return ()
class Argument(NameAssignment):
def __init__(self, lhs, rhs, entry):
NameAssignment.__init__(self, lhs, rhs, entry)
self.is_arg = True
class NameDeletion(NameAssignment):
def __init__(self, lhs, entry):
NameAssignment.__init__(self, lhs, lhs, entry)
self.is_deletion = True
def infer_type(self):
inferred_type = self.rhs.infer_type(self.entry.scope)
if (not inferred_type.is_pyobject and
inferred_type.can_coerce_to_pyobject(self.entry.scope)):
return py_object_type
self.inferred_type = inferred_type
return inferred_type
class Uninitialized(object):
"""Definitely not initialised yet."""
class Unknown(object):
"""Coming from outer closure, might be initialised or not."""
class NameReference(object):
def __init__(self, node, entry):
if node.cf_state is None:
node.cf_state = set()
self.node = node
self.entry = entry
self.pos = node.pos
def __repr__(self):
return '%s(entry=%r)' % (self.__class__.__name__, self.entry)
class ControlFlowState(list):
# Keeps track of Node's entry assignments
#
# cf_is_null [boolean] It is uninitialized
# cf_maybe_null [boolean] May be uninitialized
# is_single [boolean] Has only one assignment at this point
cf_maybe_null = False
cf_is_null = False
is_single = False
def __init__(self, state):
if Uninitialized in state:
state.discard(Uninitialized)
self.cf_maybe_null = True
if not state:
self.cf_is_null = True
elif Unknown in state:
state.discard(Unknown)
self.cf_maybe_null = True
else:
if len(state) == 1:
self.is_single = True
# XXX: Remove fake_rhs_expr
super(ControlFlowState, self).__init__(
[i for i in state if i.rhs is not fake_rhs_expr])
def one(self):
return self[0]
class GVContext(object):
"""Graphviz subgraph object."""
def __init__(self):
self.blockids = {}
self.nextid = 0
self.children = []
self.sources = {}
def add(self, child):
self.children.append(child)
def nodeid(self, block):
if block not in self.blockids:
self.blockids[block] = 'block%d' % self.nextid
self.nextid += 1
return self.blockids[block]
def extract_sources(self, block):
if not block.positions:
return ''
start = min(block.positions)
stop = max(block.positions)
srcdescr = start[0]
if not srcdescr in self.sources:
self.sources[srcdescr] = list(srcdescr.get_lines())
lines = self.sources[srcdescr]
return '\\n'.join([l.strip() for l in lines[start[1] - 1:stop[1]]])
def render(self, fp, name, annotate_defs=False):
"""Render graphviz dot graph"""
fp.write('digraph %s {\n' % name)
fp.write(' node [shape=box];\n')
for child in self.children:
child.render(fp, self, annotate_defs)
fp.write('}\n')
def escape(self, text):
return text.replace('"', '\\"').replace('\n', '\\n')
class GV(object):
"""Graphviz DOT renderer."""
def __init__(self, name, flow):
self.name = name
self.flow = flow
def render(self, fp, ctx, annotate_defs=False):
fp.write(' subgraph %s {\n' % self.name)
for block in self.flow.blocks:
label = ctx.extract_sources(block)
if annotate_defs:
for stat in block.stats:
if isinstance(stat, NameAssignment):
label += '\n %s [%s %s]' % (
stat.entry.name, 'deletion' if stat.is_deletion else 'definition', stat.pos[1])
elif isinstance(stat, NameReference):
if stat.entry:
label += '\n %s [reference %s]' % (stat.entry.name, stat.pos[1])
if not label:
label = 'empty'
pid = ctx.nodeid(block)
fp.write(' %s [label="%s"];\n' % (pid, ctx.escape(label)))
for block in self.flow.blocks:
pid = ctx.nodeid(block)
for child in block.children:
fp.write(' %s -> %s;\n' % (pid, ctx.nodeid(child)))
fp.write(' }\n')
class MessageCollection(object):
"""Collect error/warnings messages first then sort"""
def __init__(self):
self.messages = set()
def error(self, pos, message):
self.messages.add((pos, True, message))
def warning(self, pos, message):
self.messages.add((pos, False, message))
def report(self):
for pos, is_error, message in sorted(self.messages):
if is_error:
error(pos, message)
else:
warning(pos, message, 2)
def check_definitions(flow, compiler_directives):
flow.initialize()
flow.reaching_definitions()
# Track down state
assignments = set()
# Node to entry map
references = {}
assmt_nodes = set()
for block in flow.blocks:
i_state = block.i_input
for stat in block.stats:
i_assmts = flow.assmts[stat.entry]
state = flow.map_one(i_state, stat.entry)
if isinstance(stat, NameAssignment):
stat.lhs.cf_state.update(state)
assmt_nodes.add(stat.lhs)
i_state = i_state & ~i_assmts.mask
if stat.is_deletion:
i_state |= i_assmts.bit
else:
i_state |= stat.bit
assignments.add(stat)
if stat.rhs is not fake_rhs_expr:
stat.entry.cf_assignments.append(stat)
elif isinstance(stat, NameReference):
references[stat.node] = stat.entry
stat.entry.cf_references.append(stat)
stat.node.cf_state.update(state)
## if not stat.node.allow_null:
## i_state &= ~i_assmts.bit
## # after successful read, the state is known to be initialised
state.discard(Uninitialized)
state.discard(Unknown)
for assmt in state:
assmt.refs.add(stat)
# Check variable usage
warn_maybe_uninitialized = compiler_directives['warn.maybe_uninitialized']
warn_unused_result = compiler_directives['warn.unused_result']
warn_unused = compiler_directives['warn.unused']
warn_unused_arg = compiler_directives['warn.unused_arg']
messages = MessageCollection()
# assignment hints
for node in assmt_nodes:
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
if len(node.cf_state) == 1:
node.cf_is_null = True
else:
node.cf_is_null = False
elif Unknown in node.cf_state:
node.cf_maybe_null = True
else:
node.cf_is_null = False
node.cf_maybe_null = False
# Find uninitialized references and cf-hints
for node, entry in references.items():
if Uninitialized in node.cf_state:
node.cf_maybe_null = True
if not entry.from_closure and len(node.cf_state) == 1:
node.cf_is_null = True
if (node.allow_null or entry.from_closure
or entry.is_pyclass_attr or entry.type.is_error):
pass # Can be uninitialized here
elif node.cf_is_null:
if entry.error_on_uninitialized or (
Options.error_on_uninitialized and (
entry.type.is_pyobject or entry.type.is_unspecified)):
messages.error(
node.pos,
"local variable '%s' referenced before assignment"
% entry.name)
else:
messages.warning(
node.pos,
"local variable '%s' referenced before assignment"
% entry.name)
elif warn_maybe_uninitialized:
messages.warning(
node.pos,
"local variable '%s' might be referenced before assignment"
% entry.name)
elif Unknown in node.cf_state:
# TODO: better cross-closure analysis to know when inner functions
# are being called before a variable is being set, and when
# a variable is known to be set before even defining the
# inner function, etc.
node.cf_maybe_null = True
else:
node.cf_is_null = False
node.cf_maybe_null = False
# Unused result
for assmt in assignments:
if (not assmt.refs and not assmt.entry.is_pyclass_attr
and not assmt.entry.in_closure):
if assmt.entry.cf_references and warn_unused_result:
if assmt.is_arg:
messages.warning(assmt.pos, "Unused argument value '%s'" %
assmt.entry.name)
else:
messages.warning(assmt.pos, "Unused result in '%s'" %
assmt.entry.name)
assmt.lhs.cf_used = False
# Unused entries
for entry in flow.entries:
if (not entry.cf_references
and not entry.is_pyclass_attr):
if entry.name != '_' and not entry.name.startswith('unused'):
# '_' is often used for unused variables, e.g. in loops
if entry.is_arg:
if warn_unused_arg:
messages.warning(entry.pos, "Unused argument '%s'" %
entry.name)
else:
if warn_unused:
messages.warning(entry.pos, "Unused entry '%s'" %
entry.name)
entry.cf_used = False
messages.report()
for node in assmt_nodes:
node.cf_state = ControlFlowState(node.cf_state)
for node in references:
node.cf_state = ControlFlowState(node.cf_state)
class AssignmentCollector(TreeVisitor):
def __init__(self):
super(AssignmentCollector, self).__init__()
self.assignments = []
def visit_Node(self):
self._visitchildren(self, None)
def visit_SingleAssignmentNode(self, node):
self.assignments.append((node.lhs, node.rhs))
def visit_CascadedAssignmentNode(self, node):
for lhs in node.lhs_list:
self.assignments.append((lhs, node.rhs))
class ControlFlowAnalysis(CythonTransform):
def visit_ModuleNode(self, node):
self.gv_ctx = GVContext()
self.constant_folder = ConstantFolding()
# Set of NameNode reductions
self.reductions = set()
self.in_inplace_assignment = False
self.env_stack = []
self.env = node.scope
self.stack = []
self.flow = ControlFlow()
self.visitchildren(node)
check_definitions(self.flow, self.current_directives)
dot_output = self.current_directives['control_flow.dot_output']
if dot_output:
annotate_defs = self.current_directives['control_flow.dot_annotate_defs']
fp = open(dot_output, 'wt')
try:
self.gv_ctx.render(fp, 'module', annotate_defs=annotate_defs)
finally:
fp.close()
return node
def visit_FuncDefNode(self, node):
for arg in node.args:
if arg.default:
self.visitchildren(arg)
self.visitchildren(node, ('decorators',))
self.env_stack.append(self.env)
self.env = node.local_scope
self.stack.append(self.flow)
self.flow = ControlFlow()
# Collect all entries
for entry in node.local_scope.entries.values():
if self.flow.is_tracked(entry):
self.flow.entries.add(entry)
self.mark_position(node)
# Function body block
self.flow.nextblock()
for arg in node.args:
self._visit(arg)
if node.star_arg:
self.flow.mark_argument(node.star_arg,
TypedExprNode(Builtin.tuple_type,
may_be_none=False),
node.star_arg.entry)
if node.starstar_arg:
self.flow.mark_argument(node.starstar_arg,
TypedExprNode(Builtin.dict_type,
may_be_none=False),
node.starstar_arg.entry)
self._visit(node.body)
# Workaround for generators
if node.is_generator:
self._visit(node.gbody.body)
# Exit point
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
# Cleanup graph
self.flow.normalize()
check_definitions(self.flow, self.current_directives)
self.flow.blocks.add(self.flow.entry_point)
self.gv_ctx.add(GV(node.local_scope.name, self.flow))
self.flow = self.stack.pop()
self.env = self.env_stack.pop()
return node
def visit_DefNode(self, node):
node.used = True
return self.visit_FuncDefNode(node)
def visit_GeneratorBodyDefNode(self, node):
return node
def visit_CTypeDefNode(self, node):
return node
def mark_assignment(self, lhs, rhs=None):
if not self.flow.block:
return
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
if not rhs:
rhs = object_expr
if lhs.is_name:
if lhs.entry is not None:
entry = lhs.entry
else:
entry = self.env.lookup(lhs.name)
if entry is None: # TODO: This shouldn't happen...
return
self.flow.mark_assignment(lhs, rhs, entry)
elif lhs.is_sequence_constructor:
for i, arg in enumerate(lhs.args):
if not rhs or arg.is_starred:
item_node = None
else:
item_node = rhs.inferable_item_node(i)
self.mark_assignment(arg, item_node)
else:
self._visit(lhs)
if self.flow.exceptions:
exc_descr = self.flow.exceptions[-1]
self.flow.block.add_child(exc_descr.entry_point)
self.flow.nextblock()
def mark_position(self, node):
"""Mark position if DOT output is enabled."""
if self.current_directives['control_flow.dot_output']:
self.flow.mark_position(node)
def visit_FromImportStatNode(self, node):
for name, target in node.items:
if name != "*":
self.mark_assignment(target)
self.visitchildren(node)
return node
def visit_AssignmentNode(self, node):
raise InternalError("Unhandled assignment node")
def visit_SingleAssignmentNode(self, node):
self._visit(node.rhs)
self.mark_assignment(node.lhs, node.rhs)
return node
def visit_CascadedAssignmentNode(self, node):
self._visit(node.rhs)
for lhs in node.lhs_list:
self.mark_assignment(lhs, node.rhs)
return node
def visit_ParallelAssignmentNode(self, node):
collector = AssignmentCollector()
collector.visitchildren(node)
for lhs, rhs in collector.assignments:
self._visit(rhs)
for lhs, rhs in collector.assignments:
self.mark_assignment(lhs, rhs)
return node
def visit_InPlaceAssignmentNode(self, node):
self.in_inplace_assignment = True
self.visitchildren(node)
self.in_inplace_assignment = False
self.mark_assignment(node.lhs, self.constant_folder(node.create_binop_node()))
return node
def visit_DelStatNode(self, node):
for arg in node.args:
if arg.is_name:
entry = arg.entry or self.env.lookup(arg.name)
if entry.in_closure or entry.from_closure:
error(arg.pos,
"can not delete variable '%s' "
"referenced in nested scope" % entry.name)
if not node.ignore_nonexisting:
self._visit(arg) # mark reference
self.flow.mark_deletion(arg, entry)
else:
self._visit(arg)
return node
def visit_CArgDeclNode(self, node):
entry = self.env.lookup(node.name)
if entry:
may_be_none = not node.not_none
self.flow.mark_argument(
node, TypedExprNode(entry.type, may_be_none), entry)
return node
def visit_NameNode(self, node):
if self.flow.block:
entry = node.entry or self.env.lookup(node.name)
if entry:
self.flow.mark_reference(node, entry)
if entry in self.reductions and not self.in_inplace_assignment:
error(node.pos,
"Cannot read reduction variable in loop body")
return node
def visit_StatListNode(self, node):
if self.flow.block:
for stat in node.stats:
self._visit(stat)
if not self.flow.block:
stat.is_terminator = True
break
return node
def visit_Node(self, node):
self.visitchildren(node)
self.mark_position(node)
return node
def visit_SizeofVarNode(self, node):
return node
def visit_TypeidNode(self, node):
return node
def visit_IfStatNode(self, node):
next_block = self.flow.newblock()
parent = self.flow.block
# If clauses
for clause in node.if_clauses:
parent = self.flow.nextblock(parent)
self._visit(clause.condition)
self.flow.nextblock()
self._visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=parent)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
parent.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_WhileStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition block
self.flow.loops.append(LoopDescr(next_block, condition_block))
if node.condition:
self._visit(node.condition)
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
self.flow.block.add_child(next_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def mark_forloop_target(self, node):
# TODO: Remove redundancy with range optimization...
is_special = False
sequence = node.iterator.sequence
target = node.target
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name == 'reversed' and len(sequence.args) == 1:
sequence = sequence.args[0]
elif function.name == 'enumerate' and len(sequence.args) == 1:
if target.is_sequence_constructor and len(target.args) == 2:
iterator = sequence.args[0]
if iterator.is_name:
iterator_type = iterator.infer_type(self.env)
if iterator_type.is_builtin_type:
# assume that builtin types have a length within Py_ssize_t
self.mark_assignment(
target.args[0],
ExprNodes.IntNode(target.pos, value='PY_SSIZE_T_MAX',
type=PyrexTypes.c_py_ssize_t_type))
target = target.args[1]
sequence = sequence.args[0]
if isinstance(sequence, ExprNodes.SimpleCallNode):
function = sequence.function
if sequence.self is None and function.is_name:
entry = self.env.lookup(function.name)
if not entry or entry.is_builtin:
if function.name in ('range', 'xrange'):
is_special = True
for arg in sequence.args[:2]:
self.mark_assignment(target, arg)
if len(sequence.args) > 2:
self.mark_assignment(target, self.constant_folder(
ExprNodes.binop_node(node.pos,
'+',
sequence.args[0],
sequence.args[2])))
if not is_special:
# A for-loop basically translates to subsequent calls to
# __getitem__(), so using an IndexNode here allows us to
# naturally infer the base type of pointers, C arrays,
# Python strings, etc., while correctly falling back to an
# object type when the base type cannot be handled.
self.mark_assignment(target, node.item)
def visit_AsyncForStatNode(self, node):
return self.visit_ForInStatNode(node)
def visit_ForInStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self._visit(node.iterator)
# Target assignment
self.flow.nextblock()
if isinstance(node, Nodes.ForInStatNode):
self.mark_forloop_target(node)
elif isinstance(node, Nodes.AsyncForStatNode):
# not entirely correct, but good enough for now
self.mark_assignment(node.target, node.item)
else: # Parallel
self.mark_assignment(node.target)
# Body block
if isinstance(node, Nodes.ParallelRangeNode):
# In case of an invalid
self._delete_privates(node, exclude=node.target.entry)
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def _delete_privates(self, node, exclude=None):
for private_node in node.assigned_nodes:
if not exclude or private_node.entry is not exclude:
self.flow.mark_deletion(private_node, private_node.entry)
def visit_ParallelRangeNode(self, node):
reductions = self.reductions
# if node.target is None or not a NameNode, an error will have
# been previously issued
if hasattr(node.target, 'entry'):
self.reductions = set(reductions)
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
pos, reduction = node.assignments[private_node.entry]
if reduction:
self.reductions.add(private_node.entry)
node = self.visit_ForInStatNode(node)
self.reductions = reductions
return node
def visit_ParallelWithBlockNode(self, node):
for private_node in node.assigned_nodes:
private_node.entry.error_on_uninitialized = True
self._delete_privates(node)
self.visitchildren(node)
self._delete_privates(node)
return node
def visit_ForFromStatNode(self, node):
condition_block = self.flow.nextblock()
next_block = self.flow.newblock()
# Condition with iterator
self.flow.loops.append(LoopDescr(next_block, condition_block))
self._visit(node.bound1)
self._visit(node.bound2)
if node.step is not None:
self._visit(node.step)
# Target assignment
self.flow.nextblock()
self.mark_assignment(node.target, node.bound1)
if node.step is not None:
self.mark_assignment(node.target, self.constant_folder(
ExprNodes.binop_node(node.pos, '+', node.bound1, node.step)))
# Body block
self.flow.nextblock()
self._visit(node.body)
self.flow.loops.pop()
# Loop it
if self.flow.block:
self.flow.block.add_child(condition_block)
# Else clause
if node.else_clause:
self.flow.nextblock(parent=condition_block)
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
else:
condition_block.add_child(next_block)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_LoopNode(self, node):
raise InternalError("Generic loops are not supported")
def visit_WithTargetAssignmentStatNode(self, node):
self.mark_assignment(node.lhs, node.with_node.enter_call)
return node
def visit_WithStatNode(self, node):
self._visit(node.manager)
self._visit(node.enter_call)
self._visit(node.body)
return node
def visit_TryExceptStatNode(self, node):
# After exception handling
next_block = self.flow.newblock()
# Body block
self.flow.newblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.exceptions.append(ExceptionDescr(entry_point))
self.flow.nextblock()
## XXX: links to exception handling point should be added by
## XXX: children nodes
self.flow.block.add_child(entry_point)
self.flow.nextblock()
self._visit(node.body)
self.flow.exceptions.pop()
# After exception
if self.flow.block:
if node.else_clause:
self.flow.nextblock()
self._visit(node.else_clause)
if self.flow.block:
self.flow.block.add_child(next_block)
for clause in node.except_clauses:
self.flow.block = entry_point
if clause.pattern:
for pattern in clause.pattern:
self._visit(pattern)
else:
# TODO: handle * pattern
pass
entry_point = self.flow.newblock(parent=self.flow.block)
self.flow.nextblock()
if clause.target:
self.mark_assignment(clause.target)
self._visit(clause.body)
if self.flow.block:
self.flow.block.add_child(next_block)
if self.flow.exceptions:
entry_point.add_child(self.flow.exceptions[-1].entry_point)
if next_block.parents:
self.flow.block = next_block
else:
self.flow.block = None
return node
def visit_TryFinallyStatNode(self, node):
body_block = self.flow.nextblock()
# Exception entry point
entry_point = self.flow.newblock()
self.flow.block = entry_point
self._visit(node.finally_except_clause)
if self.flow.block and self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
# Normal execution
finally_enter = self.flow.newblock()
self.flow.block = finally_enter
self._visit(node.finally_clause)
finally_exit = self.flow.block
descr = ExceptionDescr(entry_point, finally_enter, finally_exit)
self.flow.exceptions.append(descr)
if self.flow.loops:
self.flow.loops[-1].exceptions.append(descr)
self.flow.block = body_block
body_block.add_child(entry_point)
self.flow.nextblock()
self._visit(node.body)
self.flow.exceptions.pop()
if self.flow.loops:
self.flow.loops[-1].exceptions.pop()
if self.flow.block:
self.flow.block.add_child(finally_enter)
if finally_exit:
self.flow.block = self.flow.nextblock(parent=finally_exit)
else:
self.flow.block = None
return node
def visit_RaiseStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReraiseStatNode(self, node):
self.mark_position(node)
if self.flow.exceptions:
self.flow.block.add_child(self.flow.exceptions[-1].entry_point)
self.flow.block = None
return node
def visit_ReturnStatNode(self, node):
self.mark_position(node)
self.visitchildren(node)
outer_exception_handlers = iter(self.flow.exceptions[::-1])
for handler in outer_exception_handlers:
if handler.finally_enter:
self.flow.block.add_child(handler.finally_enter)
if handler.finally_exit:
# 'return' goes to function exit, or to the next outer 'finally' clause
exit_point = self.flow.exit_point
for next_handler in outer_exception_handlers:
if next_handler.finally_enter:
exit_point = next_handler.finally_enter
break
handler.finally_exit.add_child(exit_point)
break
else:
if self.flow.block:
self.flow.block.add_child(self.flow.exit_point)
self.flow.block = None
return node
def visit_BreakStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "break statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.next_block)
break
else:
self.flow.block.add_child(loop.next_block)
self.flow.block = None
return node
def visit_ContinueStatNode(self, node):
if not self.flow.loops:
#error(node.pos, "continue statement not inside loop")
return node
loop = self.flow.loops[-1]
self.mark_position(node)
for exception in loop.exceptions[::-1]:
if exception.finally_enter:
self.flow.block.add_child(exception.finally_enter)
if exception.finally_exit:
exception.finally_exit.add_child(loop.loop_block)
break
else:
self.flow.block.add_child(loop.loop_block)
self.flow.block = None
return node
def visit_ComprehensionNode(self, node):
if node.expr_scope:
self.env_stack.append(self.env)
self.env = node.expr_scope
# Skip append node here
self._visit(node.loop)
if node.expr_scope:
self.env = self.env_stack.pop()
return node
def visit_ScopedExprNode(self, node):
if node.expr_scope:
self.env_stack.append(self.env)
self.env = node.expr_scope
self.visitchildren(node)
if node.expr_scope:
self.env = self.env_stack.pop()
return node
def visit_PyClassDefNode(self, node):
self.visitchildren(node, ('dict', 'metaclass',
'mkw', 'bases', 'class_result'))
self.flow.mark_assignment(node.target, node.classobj,
self.env.lookup(node.name))
self.env_stack.append(self.env)
self.env = node.scope
self.flow.nextblock()
self.visitchildren(node, ('body',))
self.flow.nextblock()
self.env = self.env_stack.pop()
return node
def visit_AmpersandNode(self, node):
if node.operand.is_name:
# Fake assignment to silence warning
self.mark_assignment(node.operand, fake_rhs_expr)
self.visitchildren(node)
return node
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.499433
Cython-0.29.28/Cython/Compiler/FusedNode.py 0000644 0001751 0000171 00000111642 00000000000 021211 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
import copy
from . import (ExprNodes, PyrexTypes, MemoryView,
ParseTreeTransforms, StringEncoding, Errors)
from .ExprNodes import CloneNode, ProxyNode, TupleNode
from .Nodes import FuncDefNode, CFuncDefNode, StatListNode, DefNode
from ..Utils import OrderedSet
class FusedCFuncDefNode(StatListNode):
"""
This node replaces a function with fused arguments. It deep-copies the
function for every permutation of fused types, and allocates a new local
scope for it. It keeps track of the original function in self.node, and
the entry of the original function in the symbol table is given the
'fused_cfunction' attribute which points back to us.
Then when a function lookup occurs (to e.g. call it), the call can be
dispatched to the right function.
node FuncDefNode the original function
nodes [FuncDefNode] list of copies of node with different specific types
py_func DefNode the fused python function subscriptable from
Python space
__signatures__ A DictNode mapping signature specialization strings
to PyCFunction nodes
resulting_fused_function PyCFunction for the fused DefNode that delegates
to specializations
fused_func_assignment Assignment of the fused function to the function name
defaults_tuple TupleNode of defaults (letting PyCFunctionNode build
defaults would result in many different tuples)
specialized_pycfuncs List of synthesized pycfunction nodes for the
specializations
code_object CodeObjectNode shared by all specializations and the
fused function
fused_compound_types All fused (compound) types (e.g. floating[:])
"""
__signatures__ = None
resulting_fused_function = None
fused_func_assignment = None
defaults_tuple = None
decorators = None
child_attrs = StatListNode.child_attrs + [
'__signatures__', 'resulting_fused_function', 'fused_func_assignment']
def __init__(self, node, env):
super(FusedCFuncDefNode, self).__init__(node.pos)
self.nodes = []
self.node = node
is_def = isinstance(self.node, DefNode)
if is_def:
# self.node.decorators = []
self.copy_def(env)
else:
self.copy_cdef(env)
# Perform some sanity checks. If anything fails, it's a bug
for n in self.nodes:
assert not n.entry.type.is_fused
assert not n.local_scope.return_type.is_fused
if node.return_type.is_fused:
assert not n.return_type.is_fused
if not is_def and n.cfunc_declarator.optional_arg_count:
assert n.type.op_arg_struct
node.entry.fused_cfunction = self
# Copy the nodes as AnalyseDeclarationsTransform will prepend
# self.py_func to self.stats, as we only want specialized
# CFuncDefNodes in self.nodes
self.stats = self.nodes[:]
def copy_def(self, env):
"""
Create a copy of the original def or lambda function for specialized
versions.
"""
fused_compound_types = PyrexTypes.unique(
[arg.type for arg in self.node.args if arg.type.is_fused])
fused_types = self._get_fused_base_types(fused_compound_types)
permutations = PyrexTypes.get_all_specialized_permutations(fused_types)
self.fused_compound_types = fused_compound_types
if self.node.entry in env.pyfunc_entries:
env.pyfunc_entries.remove(self.node.entry)
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# keep signature object identity for special casing in DefNode.analyse_declarations()
copied_node.entry.signature = self.node.entry.signature
self._specialize_function_args(copied_node.args, fused_to_specific)
copied_node.return_type = self.node.return_type.specialize(
fused_to_specific)
copied_node.analyse_declarations(env)
# copied_node.is_staticmethod = self.node.is_staticmethod
# copied_node.is_classmethod = self.node.is_classmethod
self.create_new_local_scope(copied_node, env, fused_to_specific)
self.specialize_copied_def(copied_node, cname, self.node.entry,
fused_to_specific, fused_compound_types)
PyrexTypes.specialize_entry(copied_node.entry, cname)
copied_node.entry.used = True
env.entries[copied_node.entry.name] = copied_node.entry
if not self.replace_fused_typechecks(copied_node):
break
self.orig_py_func = self.node
self.py_func = self.make_fused_cpdef(self.node, env, is_def=True)
def copy_cdef(self, env):
"""
Create a copy of the original c(p)def function for all specialized
versions.
"""
permutations = self.node.type.get_all_specialized_permutations()
# print 'Node %s has %d specializations:' % (self.node.entry.name,
# len(permutations))
# import pprint; pprint.pprint([d for cname, d in permutations])
# Prevent copying of the python function
self.orig_py_func = orig_py_func = self.node.py_func
self.node.py_func = None
if orig_py_func:
env.pyfunc_entries.remove(orig_py_func.entry)
fused_types = self.node.type.get_fused_types()
self.fused_compound_types = fused_types
new_cfunc_entries = []
for cname, fused_to_specific in permutations:
copied_node = copy.deepcopy(self.node)
# Make the types in our CFuncType specific.
type = copied_node.type.specialize(fused_to_specific)
entry = copied_node.entry
type.specialize_entry(entry, cname)
# Reuse existing Entries (e.g. from .pxd files).
for i, orig_entry in enumerate(env.cfunc_entries):
if entry.cname == orig_entry.cname and type.same_as_resolved_type(orig_entry.type):
copied_node.entry = env.cfunc_entries[i]
if not copied_node.entry.func_cname:
copied_node.entry.func_cname = entry.func_cname
entry = copied_node.entry
type = entry.type
break
else:
new_cfunc_entries.append(entry)
copied_node.type = type
entry.type, type.entry = type, entry
entry.used = (entry.used or
self.node.entry.defined_in_pxd or
env.is_c_class_scope or
entry.is_cmethod)
if self.node.cfunc_declarator.optional_arg_count:
self.node.cfunc_declarator.declare_optional_arg_struct(
type, env, fused_cname=cname)
copied_node.return_type = type.return_type
self.create_new_local_scope(copied_node, env, fused_to_specific)
# Make the argument types in the CFuncDeclarator specific
self._specialize_function_args(copied_node.cfunc_declarator.args,
fused_to_specific)
# If a cpdef, declare all specialized cpdefs (this
# also calls analyse_declarations)
copied_node.declare_cpdef_wrapper(env)
if copied_node.py_func:
env.pyfunc_entries.remove(copied_node.py_func.entry)
self.specialize_copied_def(
copied_node.py_func, cname, self.node.entry.as_variable,
fused_to_specific, fused_types)
if not self.replace_fused_typechecks(copied_node):
break
# replace old entry with new entries
try:
cindex = env.cfunc_entries.index(self.node.entry)
except ValueError:
env.cfunc_entries.extend(new_cfunc_entries)
else:
env.cfunc_entries[cindex:cindex+1] = new_cfunc_entries
if orig_py_func:
self.py_func = self.make_fused_cpdef(orig_py_func, env,
is_def=False)
else:
self.py_func = orig_py_func
def _get_fused_base_types(self, fused_compound_types):
"""
Get a list of unique basic fused types, from a list of
(possibly) compound fused types.
"""
base_types = []
seen = set()
for fused_type in fused_compound_types:
fused_type.get_fused_types(result=base_types, seen=seen)
return base_types
def _specialize_function_args(self, args, fused_to_specific):
for arg in args:
if arg.type.is_fused:
arg.type = arg.type.specialize(fused_to_specific)
if arg.type.is_memoryviewslice:
arg.type.validate_memslice_dtype(arg.pos)
def create_new_local_scope(self, node, env, f2s):
"""
Create a new local scope for the copied node and append it to
self.nodes. A new local scope is needed because the arguments with the
fused types are already in the local scope, and we need the specialized
entries created after analyse_declarations on each specialized version
of the (CFunc)DefNode.
f2s is a dict mapping each fused type to its specialized version
"""
node.create_local_scope(env)
node.local_scope.fused_to_specific = f2s
# This is copied from the original function, set it to false to
# stop recursion
node.has_fused_arguments = False
self.nodes.append(node)
def specialize_copied_def(self, node, cname, py_entry, f2s, fused_compound_types):
"""Specialize the copy of a DefNode given the copied node,
the specialization cname and the original DefNode entry"""
fused_types = self._get_fused_base_types(fused_compound_types)
type_strings = [
PyrexTypes.specialization_signature_string(fused_type, f2s)
for fused_type in fused_types
]
node.specialized_signature_string = '|'.join(type_strings)
node.entry.pymethdef_cname = PyrexTypes.get_fused_cname(
cname, node.entry.pymethdef_cname)
node.entry.doc = py_entry.doc
node.entry.doc_cname = py_entry.doc_cname
def replace_fused_typechecks(self, copied_node):
"""
Branch-prune fused type checks like
if fused_t is int:
...
Returns whether an error was issued and whether we should stop in
in order to prevent a flood of errors.
"""
num_errors = Errors.num_errors
transform = ParseTreeTransforms.ReplaceFusedTypeChecks(
copied_node.local_scope)
transform(copied_node)
if Errors.num_errors > num_errors:
return False
return True
def _fused_instance_checks(self, normal_types, pyx_code, env):
"""
Generate Cython code for instance checks, matching an object to
specialized types.
"""
for specialized_type in normal_types:
# all_numeric = all_numeric and specialized_type.is_numeric
pyx_code.context.update(
py_type_name=specialized_type.py_type_name(),
specialized_type_name=specialized_type.specialization_string,
)
pyx_code.put_chunk(
u"""
if isinstance(arg, {{py_type_name}}):
dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'; break
""")
def _dtype_name(self, dtype):
if dtype.is_typedef:
return '___pyx_%s' % dtype
return str(dtype).replace(' ', '_')
def _dtype_type(self, dtype):
if dtype.is_typedef:
return self._dtype_name(dtype)
return str(dtype)
def _sizeof_dtype(self, dtype):
if dtype.is_pyobject:
return 'sizeof(void *)'
else:
return "sizeof(%s)" % self._dtype_type(dtype)
def _buffer_check_numpy_dtype_setup_cases(self, pyx_code):
"Setup some common cases to match dtypes against specializations"
if pyx_code.indenter("if kind in b'iu':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_int")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'f':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_float")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'c':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_complex")
pyx_code.dedent()
if pyx_code.indenter("elif kind == b'O':"):
pyx_code.putln("pass")
pyx_code.named_insertion_point("dtype_object")
pyx_code.dedent()
match = "dest_sig[{{dest_sig_idx}}] = '{{specialized_type_name}}'"
no_match = "dest_sig[{{dest_sig_idx}}] = None"
def _buffer_check_numpy_dtype(self, pyx_code, specialized_buffer_types, pythran_types):
"""
Match a numpy dtype object to the individual specializations.
"""
self._buffer_check_numpy_dtype_setup_cases(pyx_code)
for specialized_type in pythran_types+specialized_buffer_types:
final_type = specialized_type
if specialized_type.is_pythran_expr:
specialized_type = specialized_type.org_buffer
dtype = specialized_type.dtype
pyx_code.context.update(
itemsize_match=self._sizeof_dtype(dtype) + " == itemsize",
signed_match="not (%s_is_signed ^ dtype_signed)" % self._dtype_name(dtype),
dtype=dtype,
specialized_type_name=final_type.specialization_string)
dtypes = [
(dtype.is_int, pyx_code.dtype_int),
(dtype.is_float, pyx_code.dtype_float),
(dtype.is_complex, pyx_code.dtype_complex)
]
for dtype_category, codewriter in dtypes:
if dtype_category:
cond = '{{itemsize_match}} and (arg.ndim) == %d' % (
specialized_type.ndim,)
if dtype.is_int:
cond += ' and {{signed_match}}'
if final_type.is_pythran_expr:
cond += ' and arg_is_pythran_compatible'
if codewriter.indenter("if %s:" % cond):
#codewriter.putln("print 'buffer match found based on numpy dtype'")
codewriter.putln(self.match)
codewriter.putln("break")
codewriter.dedent()
def _buffer_parse_format_string_check(self, pyx_code, decl_code,
specialized_type, env):
"""
For each specialized type, try to coerce the object to a memoryview
slice of that type. This means obtaining a buffer and parsing the
format string.
TODO: separate buffer acquisition from format parsing
"""
dtype = specialized_type.dtype
if specialized_type.is_buffer:
axes = [('direct', 'strided')] * specialized_type.ndim
else:
axes = specialized_type.axes
memslice_type = PyrexTypes.MemoryViewSliceType(dtype, axes)
memslice_type.create_from_py_utility_code(env)
pyx_code.context.update(
coerce_from_py_func=memslice_type.from_py_function,
dtype=dtype)
decl_code.putln(
"{{memviewslice_cname}} {{coerce_from_py_func}}(object, int)")
pyx_code.context.update(
specialized_type_name=specialized_type.specialization_string,
sizeof_dtype=self._sizeof_dtype(dtype))
pyx_code.put_chunk(
u"""
# try {{dtype}}
if itemsize == -1 or itemsize == {{sizeof_dtype}}:
memslice = {{coerce_from_py_func}}(arg, 0)
if memslice.memview:
__PYX_XDEC_MEMVIEW(&memslice, 1)
# print 'found a match for the buffer through format parsing'
%s
break
else:
__pyx_PyErr_Clear()
""" % self.match)
def _buffer_checks(self, buffer_types, pythran_types, pyx_code, decl_code, env):
"""
Generate Cython code to match objects to buffer specializations.
First try to get a numpy dtype object and match it against the individual
specializations. If that fails, try naively to coerce the object
to each specialization, which obtains the buffer each time and tries
to match the format string.
"""
# The first thing to find a match in this loop breaks out of the loop
pyx_code.put_chunk(
u"""
""" + (u"arg_is_pythran_compatible = False" if pythran_types else u"") + u"""
if ndarray is not None:
if isinstance(arg, ndarray):
dtype = arg.dtype
""" + (u"arg_is_pythran_compatible = True" if pythran_types else u"") + u"""
elif __pyx_memoryview_check(arg):
arg_base = arg.base
if isinstance(arg_base, ndarray):
dtype = arg_base.dtype
else:
dtype = None
else:
dtype = None
itemsize = -1
if dtype is not None:
itemsize = dtype.itemsize
kind = ord(dtype.kind)
dtype_signed = kind == 'i'
""")
pyx_code.indent(2)
if pythran_types:
pyx_code.put_chunk(
u"""
# Pythran only supports the endianness of the current compiler
byteorder = dtype.byteorder
if byteorder == "<" and not __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
elif byteorder == ">" and __Pyx_Is_Little_Endian():
arg_is_pythran_compatible = False
if arg_is_pythran_compatible:
cur_stride = itemsize
shape = arg.shape
strides = arg.strides
for i in range(arg.ndim-1, -1, -1):
if (strides[i]) != cur_stride:
arg_is_pythran_compatible = False
break
cur_stride *= shape[i]
else:
arg_is_pythran_compatible = not (arg.flags.f_contiguous and (arg.ndim) > 1)
""")
pyx_code.named_insertion_point("numpy_dtype_checks")
self._buffer_check_numpy_dtype(pyx_code, buffer_types, pythran_types)
pyx_code.dedent(2)
for specialized_type in buffer_types:
self._buffer_parse_format_string_check(
pyx_code, decl_code, specialized_type, env)
def _buffer_declarations(self, pyx_code, decl_code, all_buffer_types, pythran_types):
"""
If we have any buffer specializations, write out some variable
declarations and imports.
"""
decl_code.put_chunk(
u"""
ctypedef struct {{memviewslice_cname}}:
void *memview
void __PYX_XDEC_MEMVIEW({{memviewslice_cname}} *, int have_gil)
bint __pyx_memoryview_check(object)
""")
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef {{memviewslice_cname}} memslice
cdef Py_ssize_t itemsize
cdef bint dtype_signed
cdef char kind
itemsize = -1
""")
if pythran_types:
pyx_code.local_variable_declarations.put_chunk(u"""
cdef bint arg_is_pythran_compatible
cdef Py_ssize_t cur_stride
""")
pyx_code.imports.put_chunk(
u"""
cdef type ndarray
ndarray = __Pyx_ImportNumPyArrayTypeIfAvailable()
""")
seen_typedefs = set()
seen_int_dtypes = set()
for buffer_type in all_buffer_types:
dtype = buffer_type.dtype
dtype_name = self._dtype_name(dtype)
if dtype.is_typedef:
if dtype_name not in seen_typedefs:
seen_typedefs.add(dtype_name)
decl_code.putln(
'ctypedef %s %s "%s"' % (dtype.resolve(), dtype_name,
dtype.empty_declaration_code()))
if buffer_type.dtype.is_int:
if str(dtype) not in seen_int_dtypes:
seen_int_dtypes.add(str(dtype))
pyx_code.context.update(dtype_name=dtype_name,
dtype_type=self._dtype_type(dtype))
pyx_code.local_variable_declarations.put_chunk(
u"""
cdef bint {{dtype_name}}_is_signed
{{dtype_name}}_is_signed = not (<{{dtype_type}}> -1 > 0)
""")
def _split_fused_types(self, arg):
"""
Specialize fused types and split into normal types and buffer types.
"""
specialized_types = PyrexTypes.get_specialized_types(arg.type)
# Prefer long over int, etc by sorting (see type classes in PyrexTypes.py)
specialized_types.sort()
seen_py_type_names = set()
normal_types, buffer_types, pythran_types = [], [], []
has_object_fallback = False
for specialized_type in specialized_types:
py_type_name = specialized_type.py_type_name()
if py_type_name:
if py_type_name in seen_py_type_names:
continue
seen_py_type_names.add(py_type_name)
if py_type_name == 'object':
has_object_fallback = True
else:
normal_types.append(specialized_type)
elif specialized_type.is_pythran_expr:
pythran_types.append(specialized_type)
elif specialized_type.is_buffer or specialized_type.is_memoryviewslice:
buffer_types.append(specialized_type)
return normal_types, buffer_types, pythran_types, has_object_fallback
def _unpack_argument(self, pyx_code):
pyx_code.put_chunk(
u"""
# PROCESSING ARGUMENT {{arg_tuple_idx}}
if {{arg_tuple_idx}} < len(args):
arg = (args)[{{arg_tuple_idx}}]
elif kwargs is not None and '{{arg.name}}' in kwargs:
arg = (kwargs)['{{arg.name}}']
else:
{{if arg.default}}
arg = (defaults)[{{default_idx}}]
{{else}}
{{if arg_tuple_idx < min_positional_args}}
raise TypeError("Expected at least %d argument%s, got %d" % (
{{min_positional_args}}, {{'"s"' if min_positional_args != 1 else '""'}}, len(args)))
{{else}}
raise TypeError("Missing keyword-only argument: '%s'" % "{{arg.default}}")
{{endif}}
{{endif}}
""")
def make_fused_cpdef(self, orig_py_func, env, is_def):
"""
This creates the function that is indexable from Python and does
runtime dispatch based on the argument types. The function gets the
arg tuple and kwargs dict (or None) and the defaults tuple
as arguments from the Binding Fused Function's tp_call.
"""
from . import TreeFragment, Code, UtilityCode
fused_types = self._get_fused_base_types([
arg.type for arg in self.node.args if arg.type.is_fused])
context = {
'memviewslice_cname': MemoryView.memviewslice_cname,
'func_args': self.node.args,
'n_fused': len(fused_types),
'min_positional_args':
self.node.num_required_args - self.node.num_required_kw_args
if is_def else
sum(1 for arg in self.node.args if arg.default is None),
'name': orig_py_func.entry.name,
}
pyx_code = Code.PyxCodeWriter(context=context)
decl_code = Code.PyxCodeWriter(context=context)
decl_code.put_chunk(
u"""
cdef extern from *:
void __pyx_PyErr_Clear "PyErr_Clear" ()
type __Pyx_ImportNumPyArrayTypeIfAvailable()
int __Pyx_Is_Little_Endian()
""")
decl_code.indent()
pyx_code.put_chunk(
u"""
def __pyx_fused_cpdef(signatures, args, kwargs, defaults):
# FIXME: use a typed signature - currently fails badly because
# default arguments inherit the types we specify here!
dest_sig = [None] * {{n_fused}}
if kwargs is not None and not kwargs:
kwargs = None
cdef Py_ssize_t i
# instance check body
""")
pyx_code.indent() # indent following code to function body
pyx_code.named_insertion_point("imports")
pyx_code.named_insertion_point("func_defs")
pyx_code.named_insertion_point("local_variable_declarations")
fused_index = 0
default_idx = 0
all_buffer_types = OrderedSet()
seen_fused_types = set()
for i, arg in enumerate(self.node.args):
if arg.type.is_fused:
arg_fused_types = arg.type.get_fused_types()
if len(arg_fused_types) > 1:
raise NotImplementedError("Determination of more than one fused base "
"type per argument is not implemented.")
fused_type = arg_fused_types[0]
if arg.type.is_fused and fused_type not in seen_fused_types:
seen_fused_types.add(fused_type)
context.update(
arg_tuple_idx=i,
arg=arg,
dest_sig_idx=fused_index,
default_idx=default_idx,
)
normal_types, buffer_types, pythran_types, has_object_fallback = self._split_fused_types(arg)
self._unpack_argument(pyx_code)
# 'unrolled' loop, first match breaks out of it
if pyx_code.indenter("while 1:"):
if normal_types:
self._fused_instance_checks(normal_types, pyx_code, env)
if buffer_types or pythran_types:
env.use_utility_code(Code.UtilityCode.load_cached("IsLittleEndian", "ModuleSetupCode.c"))
self._buffer_checks(buffer_types, pythran_types, pyx_code, decl_code, env)
if has_object_fallback:
pyx_code.context.update(specialized_type_name='object')
pyx_code.putln(self.match)
else:
pyx_code.putln(self.no_match)
pyx_code.putln("break")
pyx_code.dedent()
fused_index += 1
all_buffer_types.update(buffer_types)
all_buffer_types.update(ty.org_buffer for ty in pythran_types)
if arg.default:
default_idx += 1
if all_buffer_types:
self._buffer_declarations(pyx_code, decl_code, all_buffer_types, pythran_types)
env.use_utility_code(Code.UtilityCode.load_cached("Import", "ImportExport.c"))
env.use_utility_code(Code.UtilityCode.load_cached("ImportNumPyArray", "ImportExport.c"))
pyx_code.put_chunk(
u"""
candidates = []
for sig in signatures:
match_found = False
src_sig = sig.strip('()').split('|')
for i in range(len(dest_sig)):
dst_type = dest_sig[i]
if dst_type is not None:
if src_sig[i] == dst_type:
match_found = True
else:
match_found = False
break
if match_found:
candidates.append(sig)
if not candidates:
raise TypeError("No matching signature found")
elif len(candidates) > 1:
raise TypeError("Function call with ambiguous argument types")
else:
return (signatures)[candidates[0]]
""")
fragment_code = pyx_code.getvalue()
# print decl_code.getvalue()
# print fragment_code
from .Optimize import ConstantFolding
fragment = TreeFragment.TreeFragment(
fragment_code, level='module', pipeline=[ConstantFolding()])
ast = TreeFragment.SetPosTransform(self.node.pos)(fragment.root)
UtilityCode.declare_declarations_in_scope(
decl_code.getvalue(), env.global_scope())
ast.scope = env
# FIXME: for static methods of cdef classes, we build the wrong signature here: first arg becomes 'self'
ast.analyse_declarations(env)
py_func = ast.stats[-1] # the DefNode
self.fragment_scope = ast.scope
if isinstance(self.node, DefNode):
py_func.specialized_cpdefs = self.nodes[:]
else:
py_func.specialized_cpdefs = [n.py_func for n in self.nodes]
return py_func
def update_fused_defnode_entry(self, env):
copy_attributes = (
'name', 'pos', 'cname', 'func_cname', 'pyfunc_cname',
'pymethdef_cname', 'doc', 'doc_cname', 'is_member',
'scope'
)
entry = self.py_func.entry
for attr in copy_attributes:
setattr(entry, attr,
getattr(self.orig_py_func.entry, attr))
self.py_func.name = self.orig_py_func.name
self.py_func.doc = self.orig_py_func.doc
env.entries.pop('__pyx_fused_cpdef', None)
if isinstance(self.node, DefNode):
env.entries[entry.name] = entry
else:
env.entries[entry.name].as_variable = entry
env.pyfunc_entries.append(entry)
self.py_func.entry.fused_cfunction = self
for node in self.nodes:
if isinstance(self.node, DefNode):
node.fused_py_func = self.py_func
else:
node.py_func.fused_py_func = self.py_func
node.entry.as_variable = entry
self.synthesize_defnodes()
self.stats.append(self.__signatures__)
def analyse_expressions(self, env):
"""
Analyse the expressions. Take care to only evaluate default arguments
once and clone the result for all specializations
"""
for fused_compound_type in self.fused_compound_types:
for fused_type in fused_compound_type.get_fused_types():
for specialization_type in fused_type.types:
if specialization_type.is_complex:
specialization_type.create_declaration_utility_code(env)
if self.py_func:
self.__signatures__ = self.__signatures__.analyse_expressions(env)
self.py_func = self.py_func.analyse_expressions(env)
self.resulting_fused_function = self.resulting_fused_function.analyse_expressions(env)
self.fused_func_assignment = self.fused_func_assignment.analyse_expressions(env)
self.defaults = defaults = []
for arg in self.node.args:
if arg.default:
arg.default = arg.default.analyse_expressions(env)
defaults.append(ProxyNode(arg.default))
else:
defaults.append(None)
for i, stat in enumerate(self.stats):
stat = self.stats[i] = stat.analyse_expressions(env)
if isinstance(stat, FuncDefNode):
for arg, default in zip(stat.args, defaults):
if default is not None:
arg.default = CloneNode(default).coerce_to(arg.type, env)
if self.py_func:
args = [CloneNode(default) for default in defaults if default]
self.defaults_tuple = TupleNode(self.pos, args=args)
self.defaults_tuple = self.defaults_tuple.analyse_types(env, skip_children=True).coerce_to_pyobject(env)
self.defaults_tuple = ProxyNode(self.defaults_tuple)
self.code_object = ProxyNode(self.specialized_pycfuncs[0].code_object)
fused_func = self.resulting_fused_function.arg
fused_func.defaults_tuple = CloneNode(self.defaults_tuple)
fused_func.code_object = CloneNode(self.code_object)
for i, pycfunc in enumerate(self.specialized_pycfuncs):
pycfunc.code_object = CloneNode(self.code_object)
pycfunc = self.specialized_pycfuncs[i] = pycfunc.analyse_types(env)
pycfunc.defaults_tuple = CloneNode(self.defaults_tuple)
return self
def synthesize_defnodes(self):
"""
Create the __signatures__ dict of PyCFunctionNode specializations.
"""
if isinstance(self.nodes[0], CFuncDefNode):
nodes = [node.py_func for node in self.nodes]
else:
nodes = self.nodes
signatures = [StringEncoding.EncodedString(node.specialized_signature_string)
for node in nodes]
keys = [ExprNodes.StringNode(node.pos, value=sig)
for node, sig in zip(nodes, signatures)]
values = [ExprNodes.PyCFunctionNode.from_defnode(node, binding=True)
for node in nodes]
self.__signatures__ = ExprNodes.DictNode.from_pairs(self.pos, zip(keys, values))
self.specialized_pycfuncs = values
for pycfuncnode in values:
pycfuncnode.is_specialization = True
def generate_function_definitions(self, env, code):
if self.py_func:
self.py_func.pymethdef_required = True
self.fused_func_assignment.generate_function_definitions(env, code)
for stat in self.stats:
if isinstance(stat, FuncDefNode) and stat.entry.used:
code.mark_pos(stat.pos)
stat.generate_function_definitions(env, code)
def generate_execution_code(self, code):
# Note: all def function specialization are wrapped in PyCFunction
# nodes in the self.__signatures__ dictnode.
for default in self.defaults:
if default is not None:
default.generate_evaluation_code(code)
if self.py_func:
self.defaults_tuple.generate_evaluation_code(code)
self.code_object.generate_evaluation_code(code)
for stat in self.stats:
code.mark_pos(stat.pos)
if isinstance(stat, ExprNodes.ExprNode):
stat.generate_evaluation_code(code)
else:
stat.generate_execution_code(code)
if self.__signatures__:
self.resulting_fused_function.generate_evaluation_code(code)
code.putln(
"((__pyx_FusedFunctionObject *) %s)->__signatures__ = %s;" %
(self.resulting_fused_function.result(),
self.__signatures__.result()))
code.put_giveref(self.__signatures__.result())
self.__signatures__.generate_post_assignment_code(code)
self.__signatures__.free_temps(code)
self.fused_func_assignment.generate_execution_code(code)
# Dispose of results
self.resulting_fused_function.generate_disposal_code(code)
self.resulting_fused_function.free_temps(code)
self.defaults_tuple.generate_disposal_code(code)
self.defaults_tuple.free_temps(code)
self.code_object.generate_disposal_code(code)
self.code_object.free_temps(code)
for default in self.defaults:
if default is not None:
default.generate_disposal_code(code)
default.free_temps(code)
def annotate(self, code):
for stat in self.stats:
stat.annotate(code)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.499433
Cython-0.29.28/Cython/Compiler/Future.py 0000644 0001751 0000171 00000001113 00000000000 020576 0 ustar 00runner docker 0000000 0000000 def _get_feature(name):
import __future__
# fall back to a unique fake object for earlier Python versions or Python 3
return getattr(__future__, name, object())
unicode_literals = _get_feature("unicode_literals")
with_statement = _get_feature("with_statement") # dummy
division = _get_feature("division")
print_function = _get_feature("print_function")
absolute_import = _get_feature("absolute_import")
nested_scopes = _get_feature("nested_scopes") # dummy
generators = _get_feature("generators") # dummy
generator_stop = _get_feature("generator_stop")
del _get_feature
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.499433
Cython-0.29.28/Cython/Compiler/Interpreter.py 0000644 0001751 0000171 00000004072 00000000000 021636 0 ustar 00runner docker 0000000 0000000 """
This module deals with interpreting the parse tree as Python
would have done, in the compiler.
For now this only covers parse tree to value conversion of
compile-time values.
"""
from __future__ import absolute_import
from .Nodes import *
from .ExprNodes import *
from .Errors import CompileError
class EmptyScope(object):
def lookup(self, name):
return None
empty_scope = EmptyScope()
def interpret_compiletime_options(optlist, optdict, type_env=None, type_args=()):
"""
Tries to interpret a list of compile time option nodes.
The result will be a tuple (optlist, optdict) but where
all expression nodes have been interpreted. The result is
in the form of tuples (value, pos).
optlist is a list of nodes, while optdict is a DictNode (the
result optdict is a dict)
If type_env is set, all type nodes will be analysed and the resulting
type set. Otherwise only interpretateable ExprNodes
are allowed, other nodes raises errors.
A CompileError will be raised if there are problems.
"""
def interpret(node, ix):
if ix in type_args:
if type_env:
type = node.analyse_as_type(type_env)
if not type:
raise CompileError(node.pos, "Invalid type.")
return (type, node.pos)
else:
raise CompileError(node.pos, "Type not allowed here.")
else:
if (sys.version_info[0] >=3 and
isinstance(node, StringNode) and
node.unicode_value is not None):
return (node.unicode_value, node.pos)
return (node.compile_time_value(empty_scope), node.pos)
if optlist:
optlist = [interpret(x, ix) for ix, x in enumerate(optlist)]
if optdict:
assert isinstance(optdict, DictNode)
new_optdict = {}
for item in optdict.key_value_pairs:
new_key, dummy = interpret(item.key, None)
new_optdict[new_key] = interpret(item.value, item.key.value)
optdict = new_optdict
return (optlist, new_optdict)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.499433
Cython-0.29.28/Cython/Compiler/Lexicon.py 0000644 0001751 0000171 00000011367 00000000000 020741 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3, py2_import=True
#
# Cython Scanner - Lexical Definitions
#
from __future__ import absolute_import, unicode_literals
raw_prefixes = "rR"
bytes_prefixes = "bB"
string_prefixes = "fFuU" + bytes_prefixes
char_prefixes = "cC"
any_string_prefix = raw_prefixes + string_prefixes + char_prefixes
IDENT = 'IDENT'
def make_lexicon():
from ..Plex import \
Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \
TEXT, IGNORE, State, Lexicon
from .Scanning import Method
letter = Any("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_")
digit = Any("0123456789")
bindigit = Any("01")
octdigit = Any("01234567")
hexdigit = Any("0123456789ABCDEFabcdef")
indentation = Bol + Rep(Any(" \t"))
def underscore_digits(d):
return Rep1(d) + Rep(Str("_") + Rep1(d))
decimal = underscore_digits(digit)
dot = Str(".")
exponent = Any("Ee") + Opt(Any("+-")) + decimal
decimal_fract = (decimal + dot + Opt(decimal)) | (dot + decimal)
name = letter + Rep(letter | digit)
intconst = decimal | (Str("0") + ((Any("Xx") + underscore_digits(hexdigit)) |
(Any("Oo") + underscore_digits(octdigit)) |
(Any("Bb") + underscore_digits(bindigit)) ))
intsuffix = (Opt(Any("Uu")) + Opt(Any("Ll")) + Opt(Any("Ll"))) | (Opt(Any("Ll")) + Opt(Any("Ll")) + Opt(Any("Uu")))
intliteral = intconst + intsuffix
fltconst = (decimal_fract + Opt(exponent)) | (decimal + exponent)
imagconst = (intconst | fltconst) + Any("jJ")
# invalid combinations of prefixes are caught in p_string_literal
beginstring = Opt(Rep(Any(string_prefixes + raw_prefixes)) |
Any(char_prefixes)
) + (Str("'") | Str('"') | Str("'''") | Str('"""'))
two_oct = octdigit + octdigit
three_oct = octdigit + octdigit + octdigit
two_hex = hexdigit + hexdigit
four_hex = two_hex + two_hex
escapeseq = Str("\\") + (two_oct | three_oct |
Str('N{') + Rep(AnyBut('}')) + Str('}') |
Str('u') + four_hex | Str('x') + two_hex |
Str('U') + four_hex + four_hex | AnyChar)
bra = Any("([{")
ket = Any(")]}")
punct = Any(":,;+-*/|&<>=.%`~^?!@")
diphthong = Str("==", "<>", "!=", "<=", ">=", "<<", ">>", "**", "//",
"+=", "-=", "*=", "/=", "%=", "|=", "^=", "&=",
"<<=", ">>=", "**=", "//=", "->", "@=")
spaces = Rep1(Any(" \t\f"))
escaped_newline = Str("\\\n")
lineterm = Eol + Opt(Str("\n"))
comment = Str("#") + Rep(AnyBut("\n"))
return Lexicon([
(name, IDENT),
(intliteral, Method('strip_underscores', symbol='INT')),
(fltconst, Method('strip_underscores', symbol='FLOAT')),
(imagconst, Method('strip_underscores', symbol='IMAG')),
(punct | diphthong, TEXT),
(bra, Method('open_bracket_action')),
(ket, Method('close_bracket_action')),
(lineterm, Method('newline_action')),
(beginstring, Method('begin_string_action')),
(comment, IGNORE),
(spaces, IGNORE),
(escaped_newline, IGNORE),
State('INDENT', [
(comment + lineterm, Method('commentline')),
(Opt(spaces) + Opt(comment) + lineterm, IGNORE),
(indentation, Method('indentation_action')),
(Eof, Method('eof_action'))
]),
State('SQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Str('"'), 'CHARS'),
(Str("\n"), Method('unclosed_string_action')),
(Str("'"), Method('end_string_action')),
(Eof, 'EOF')
]),
State('DQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\n\\')), 'CHARS'),
(Str("'"), 'CHARS'),
(Str("\n"), Method('unclosed_string_action')),
(Str('"'), Method('end_string_action')),
(Eof, 'EOF')
]),
State('TSQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut("'\"\n\\")), 'CHARS'),
(Any("'\""), 'CHARS'),
(Str("\n"), 'NEWLINE'),
(Str("'''"), Method('end_string_action')),
(Eof, 'EOF')
]),
State('TDQ_STRING', [
(escapeseq, 'ESCAPE'),
(Rep1(AnyBut('"\'\n\\')), 'CHARS'),
(Any("'\""), 'CHARS'),
(Str("\n"), 'NEWLINE'),
(Str('"""'), Method('end_string_action')),
(Eof, 'EOF')
]),
(Eof, Method('eof_action'))
],
# FIXME: Plex 1.9 needs different args here from Plex 1.1.4
#debug_flags = scanner_debug_flags,
#debug_file = scanner_dump_file
)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055911.499433
Cython-0.29.28/Cython/Compiler/Main.py 0000644 0001751 0000171 00000107210 00000000000 020215 0 ustar 00runner docker 0000000 0000000 #
# Cython Top Level
#
from __future__ import absolute_import
import os
import re
import sys
import io
if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[:2] < (3, 3):
sys.stderr.write("Sorry, Cython requires Python 2.6+ or 3.3+, found %d.%d\n" % tuple(sys.version_info[:2]))
sys.exit(1)
try:
from __builtin__ import basestring
except ImportError:
basestring = str
# Do not import Parsing here, import it when needed, because Parsing imports
# Nodes, which globally needs debug command line options initialized to set a
# conditional metaclass. These options are processed by CmdLine called from
# main() in this file.
# import Parsing
from . import Errors
from .StringEncoding import EncodedString
from .Scanning import PyrexScanner, FileSourceDescriptor
from .Errors import PyrexError, CompileError, error, warning
from .Symtab import ModuleScope
from .. import Utils
from . import Options
from . import Version # legacy import needed by old PyTables versions
version = Version.version # legacy attribute - use "Cython.__version__" instead
module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)*$")
verbose = 0
standard_include_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.path.pardir, 'Includes'))
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
# While Context contains every pxd ever loaded, path information etc.,
# this only contains the data related to a single compilation pass
#
# pyx ModuleNode Main code tree of this compilation.
# pxds {string : ModuleNode} Trees for the pxds used in the pyx.
# codewriter CCodeWriter Where to output final code.
# options CompilationOptions
# result CompilationResult
pass
class Context(object):
# This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their
# associated and imported declaration files. It includes
# the root of the module import namespace and the list
# of directories to search for include files.
#
# modules {string : ModuleScope}
# include_directories [string]
# future_directives [object]
# language_level int currently 2 or 3 for Python 2/3
cython_scope = None
language_level = None # warn when not set but default to Py2
def __init__(self, include_directories, compiler_directives, cpp=False,
language_level=None, options=None):
# cython_scope is a hack, set to False by subclasses, in order to break
# an infinite loop.
# Better code organization would fix it.
from . import Builtin, CythonScope
self.modules = {"__builtin__" : Builtin.builtin_scope}
self.cython_scope = CythonScope.create_cython_scope(self)
self.modules["cython"] = self.cython_scope
self.include_directories = include_directories
self.future_directives = set()
self.compiler_directives = compiler_directives
self.cpp = cpp
self.options = options
self.pxds = {} # full name -> node tree
self._interned = {} # (type(value), value, *key_args) -> interned_value
if language_level is not None:
self.set_language_level(language_level)
self.gdb_debug_outputwriter = None
def set_language_level(self, level):
from .Future import print_function, unicode_literals, absolute_import, division
future_directives = set()
if level == '3str':
level = 3
else:
level = int(level)
if level >= 3:
future_directives.add(unicode_literals)
if level >= 3:
future_directives.update([print_function, absolute_import, division])
self.language_level = level
self.future_directives = future_directives
if level >= 3:
self.modules['builtins'] = self.modules['__builtin__']
def intern_ustring(self, value, encoding=None):
key = (EncodedString, value, encoding)
try:
return self._interned[key]
except KeyError:
pass
value = EncodedString(value)
if encoding:
value.encoding = encoding
self._interned[key] = value
return value
def intern_value(self, value, *key):
key = (type(value), value) + key
try:
return self._interned[key]
except KeyError:
pass
self._interned[key] = value
return value
# pipeline creation functions can now be found in Pipeline.py
def process_pxd(self, source_desc, scope, module_name):
from . import Pipeline
if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':
source = CompilationSource(source_desc, module_name, os.getcwd())
result_sink = create_default_resultobj(source, self.options)
pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)
result = Pipeline.run_pipeline(pipeline, source)
else:
pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
result = Pipeline.run_pipeline(pipeline, source_desc)
return result
def nonfatal_error(self, exc):
return Errors.report_error(exc)
def find_module(self, module_name, relative_to=None, pos=None, need_pxd=1,
absolute_fallback=True):
# Finds and returns the module scope corresponding to
# the given relative or absolute module name. If this
# is the first time the module has been requested, finds
# the corresponding .pxd file and process it.
# If relative_to is not None, it must be a module scope,
# and the module will first be searched for relative to
# that module, provided its name is not a dotted name.
debug_find_module = 0
if debug_find_module:
print("Context.find_module: module_name = %s, relative_to = %s, pos = %s, need_pxd = %s" % (
module_name, relative_to, pos, need_pxd))
scope = None
pxd_pathname = None
if relative_to:
if module_name:
# from .module import ...
qualified_name = relative_to.qualify_name(module_name)
else:
# from . import ...
qualified_name = relative_to.qualified_name
scope = relative_to
relative_to = None
else:
qualified_name = module_name
if not module_name_pattern.match(qualified_name):
raise CompileError(pos or (module_name, 0, 0),
"'%s' is not a valid module name" % module_name)
if relative_to:
if debug_find_module:
print("...trying relative import")
scope = relative_to.lookup_submodule(module_name)
if not scope:
pxd_pathname = self.find_pxd_file(qualified_name, pos)
if pxd_pathname:
scope = relative_to.find_submodule(module_name)
if not scope:
if debug_find_module:
print("...trying absolute import")
if absolute_fallback:
qualified_name = module_name
scope = self
for name in qualified_name.split("."):
scope = scope.find_submodule(name)
if debug_find_module:
print("...scope = %s" % scope)
if not scope.pxd_file_loaded:
if debug_find_module:
print("...pxd not loaded")
if not pxd_pathname:
if debug_find_module:
print("...looking for pxd file")
# Only look in sys.path if we are explicitly looking
# for a .pxd file.
pxd_pathname = self.find_pxd_file(qualified_name, pos, sys_path=need_pxd)
if debug_find_module:
print("......found %s" % pxd_pathname)
if not pxd_pathname and need_pxd:
# Set pxd_file_loaded such that we don't need to
# look for the non-existing pxd file next time.
scope.pxd_file_loaded = True
package_pathname = self.search_include_directories(qualified_name, ".py", pos)
if package_pathname and package_pathname.endswith('__init__.py'):
pass
else:
error(pos, "'%s.pxd' not found" % qualified_name.replace('.', os.sep))
if pxd_pathname:
scope.pxd_file_loaded = True
try:
if debug_find_module:
print("Context.find_module: Parsing %s" % pxd_pathname)
rel_path = module_name.replace('.', os.sep) + os.path.splitext(pxd_pathname)[1]
if not pxd_pathname.endswith(rel_path):
rel_path = pxd_pathname # safety measure to prevent printing incorrect paths
source_desc = FileSourceDescriptor(pxd_pathname, rel_path)
err, result = self.process_pxd(source_desc, scope, qualified_name)
if err:
raise err
(pxd_codenodes, pxd_scope) = result
self.pxds[module_name] = (pxd_codenodes, pxd_scope)
except CompileError:
pass
return scope
def find_pxd_file(self, qualified_name, pos, sys_path=True):
# Search include path (and sys.path if sys_path is True) for
# the .pxd file corresponding to the given fully-qualified
# module name.
# Will find either a dotted filename or a file in a
# package directory. If a source file position is given,
# the directory containing the source file is searched first
# for a dotted filename, and its containing package root
# directory is searched first for a non-dotted filename.
pxd = self.search_include_directories(qualified_name, ".pxd", pos, sys_path=sys_path)
if pxd is None: # XXX Keep this until Includes/Deprecated is removed
if (qualified_name.startswith('python') or
qualified_name in ('stdlib', 'stdio', 'stl')):
standard_include_path = os.path.abspath(os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes')))
deprecated_include_path = os.path.join(standard_include_path, 'Deprecated')
self.include_directories.append(deprecated_include_path)
try:
pxd = self.search_include_directories(qualified_name, ".pxd", pos)
finally:
self.include_directories.pop()
if pxd:
name = qualified_name
if name.startswith('python'):
warning(pos, "'%s' is deprecated, use 'cpython'" % name, 1)
elif name in ('stdlib', 'stdio'):
warning(pos, "'%s' is deprecated, use 'libc.%s'" % (name, name), 1)
elif name in ('stl'):
warning(pos, "'%s' is deprecated, use 'libcpp.*.*'" % name, 1)
if pxd is None and Options.cimport_from_pyx:
return self.find_pyx_file(qualified_name, pos)
return pxd
def find_pyx_file(self, qualified_name, pos):
# Search include path for the .pyx file corresponding to the
# given fully-qualified module name, as for find_pxd_file().
return self.search_include_directories(qualified_name, ".pyx", pos)
def find_include_file(self, filename, pos):
# Search list of include directories for filename.
# Reports an error and returns None if not found.
path = self.search_include_directories(filename, "", pos,
include=True)
if not path:
error(pos, "'%s' not found" % filename)
return path
def search_include_directories(self, qualified_name, suffix, pos,
include=False, sys_path=False):
include_dirs = self.include_directories
if sys_path:
include_dirs = include_dirs + sys.path
# include_dirs must be hashable for caching in @cached_function
include_dirs = tuple(include_dirs + [standard_include_path])
return search_include_directories(include_dirs, qualified_name,
suffix, pos, include)
def find_root_package_dir(self, file_path):
return Utils.find_root_package_dir(file_path)
def check_package_dir(self, dir, package_names):
return Utils.check_package_dir(dir, tuple(package_names))
def c_file_out_of_date(self, source_path, output_path):
if not os.path.exists(output_path):
return 1
c_time = Utils.modification_time(output_path)
if Utils.file_newer_than(source_path, c_time):
return 1
pos = [source_path]
pxd_path = Utils.replace_suffix(source_path, ".pxd")
if os.path.exists(pxd_path) and Utils.file_newer_than(pxd_path, c_time):
return 1
for kind, name in self.read_dependency_file(source_path):
if kind == "cimport":
dep_path = self.find_pxd_file(name, pos)
elif kind == "include":
dep_path = self.search_include_directories(name, pos)
else:
continue
if dep_path and Utils.file_newer_than(dep_path, c_time):
return 1
return 0
def find_cimported_module_names(self, source_path):
return [ name for kind, name in self.read_dependency_file(source_path)
if kind == "cimport" ]
def is_package_dir(self, dir_path):
return Utils.is_package_dir(dir_path)
def read_dependency_file(self, source_path):
dep_path = Utils.replace_suffix(source_path, ".dep")
if os.path.exists(dep_path):
f = open(dep_path, "rU")
chunks = [ line.strip().split(" ", 1)
for line in f.readlines()
if " " in line.strip() ]
f.close()
return chunks
else:
return ()
def lookup_submodule(self, name):
# Look up a top-level module. Returns None if not found.
return self.modules.get(name, None)
def find_submodule(self, name):
# Find a top-level module, creating a new one if needed.
scope = self.lookup_submodule(name)
if not scope:
scope = ModuleScope(name,
parent_module = None, context = self)
self.modules[name] = scope
return scope
def parse(self, source_desc, scope, pxd, full_module_name):
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
source_filename = source_desc.filename
scope.cpp = self.cpp
# Parse the given source file and return a parse tree.
num_errors = Errors.num_errors
try:
with Utils.open_source_file(source_filename) as f:
from . import Parsing
s = PyrexScanner(f, source_desc, source_encoding = f.encoding,
scope = scope, context = self)
tree = Parsing.p_module(s, pxd, full_module_name)
if self.options.formal_grammar:
try:
from ..Parser import ConcreteSyntaxTree
except ImportError:
raise RuntimeError(
"Formal grammar can only be used with compiled Cython with an available pgen.")
ConcreteSyntaxTree.p_module(source_filename)
except UnicodeDecodeError as e:
#import traceback
#traceback.print_exc()
raise self._report_decode_error(source_desc, e)
if Errors.num_errors > num_errors:
raise CompileError()
return tree
def _report_decode_error(self, source_desc, exc):
msg = exc.args[-1]
position = exc.args[2]
encoding = exc.args[0]
line = 1
column = idx = 0
with io.open(source_desc.filename, "r", encoding='iso8859-1', newline='') as f:
for line, data in enumerate(f, 1):
idx += len(data)
if idx >= position:
column = position - (idx - len(data)) + 1
break
return error((source_desc, line, column),
"Decoding error, missing or incorrect coding= "
"at top of source (cannot decode with encoding %r: %s)" % (encoding, msg))
def extract_module_name(self, path, options):
# Find fully_qualified module name from the full pathname
# of a source file.
dir, filename = os.path.split(path)
module_name, _ = os.path.splitext(filename)
if "." in module_name:
return module_name
names = [module_name]
while self.is_package_dir(dir):
parent, package_name = os.path.split(dir)
if parent == dir:
break
names.append(package_name)
dir = parent
names.reverse()
return ".".join(names)
def setup_errors(self, options, result):
Errors.reset() # clear any remaining error state
if options.use_listing_file:
path = result.listing_file = Utils.replace_suffix(result.main_source_file, ".lis")
else:
path = None
Errors.open_listing_file(path=path,
echo_to_stderr=options.errors_to_stderr)
def teardown_errors(self, err, options, result):
source_desc = result.compilation_source.source_desc
if not isinstance(source_desc, FileSourceDescriptor):
raise RuntimeError("Only file sources for code supported")
Errors.close_listing_file()
result.num_errors = Errors.num_errors
if result.num_errors > 0:
err = True
if err and result.c_file:
try:
Utils.castrate_file(result.c_file, os.stat(source_desc.filename))
except EnvironmentError:
pass
result.c_file = None
def get_output_filename(source_filename, cwd, options):
if options.cplus:
c_suffix = ".cpp"
else:
c_suffix = ".c"
suggested_file_name = Utils.replace_suffix(source_filename, c_suffix)
if options.output_file:
out_path = os.path.join(cwd, options.output_file)
if os.path.isdir(out_path):
return os.path.join(out_path, os.path.basename(suggested_file_name))
else:
return out_path
else:
return suggested_file_name
def create_default_resultobj(compilation_source, options):
result = CompilationResult()
result.main_source_file = compilation_source.source_desc.filename
result.compilation_source = compilation_source
source_desc = compilation_source.source_desc
result.c_file = get_output_filename(source_desc.filename,
compilation_source.cwd, options)
result.embedded_metadata = options.embedded_metadata
return result
def run_pipeline(source, options, full_module_name=None, context=None):
from . import Pipeline
source_ext = os.path.splitext(source)[1]
options.configure_language_defaults(source_ext[1:]) # py/pyx
if context is None:
context = options.create_context()
# Set up source object
cwd = os.getcwd()
abs_path = os.path.abspath(source)
full_module_name = full_module_name or context.extract_module_name(source, options)
Utils.raise_error_if_module_name_forbidden(full_module_name)
if options.relative_path_in_code_position_comments:
rel_path = full_module_name.replace('.', os.sep) + source_ext
if not abs_path.endswith(rel_path):
rel_path = source # safety measure to prevent printing incorrect paths
else:
rel_path = abs_path
source_desc = FileSourceDescriptor(abs_path, rel_path)
source = CompilationSource(source_desc, full_module_name, cwd)
# Set up result object
result = create_default_resultobj(source, options)
if options.annotate is None:
# By default, decide based on whether an html file already exists.
html_filename = os.path.splitext(result.c_file)[0] + ".html"
if os.path.exists(html_filename):
with io.open(html_filename, "r", encoding="UTF-8") as html_file:
if u' State %d\n" % (key, state['number']))
for key in ('bol', 'eol', 'eof', 'else'):
state = special_to_state.get(key, None)
if state:
file.write(" %s --> State %d\n" % (key, state['number']))
def chars_to_ranges(self, char_list):
char_list.sort()
i = 0
n = len(char_list)
result = []
while i < n:
c1 = ord(char_list[i])
c2 = c1
i += 1
while i < n and ord(char_list[i]) == c2 + 1:
i += 1
c2 += 1
result.append((chr(c1), chr(c2)))
return tuple(result)
def ranges_to_string(self, range_list):
return ','.join(map(self.range_to_string, range_list))
def range_to_string(self, range_tuple):
(c1, c2) = range_tuple
if c1 == c2:
return repr(c1)
else:
return "%s..%s" % (repr(c1), repr(c2))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Plex/Regexps.py 0000644 0001751 0000171 00000037520 00000000000 020112 0 ustar 00runner docker 0000000 0000000 #=======================================================================
#
# Python Lexical Analyser
#
# Regular Expressions
#
#=======================================================================
from __future__ import absolute_import
import types
try:
from sys import maxsize as maxint
except ImportError:
from sys import maxint
from . import Errors
#
# Constants
#
BOL = 'bol'
EOL = 'eol'
EOF = 'eof'
nl_code = ord('\n')
#
# Helper functions
#
def chars_to_ranges(s):
"""
Return a list of character codes consisting of pairs
[code1a, code1b, code2a, code2b,...] which cover all
the characters in |s|.
"""
char_list = list(s)
char_list.sort()
i = 0
n = len(char_list)
result = []
while i < n:
code1 = ord(char_list[i])
code2 = code1 + 1
i += 1
while i < n and code2 >= ord(char_list[i]):
code2 += 1
i += 1
result.append(code1)
result.append(code2)
return result
def uppercase_range(code1, code2):
"""
If the range of characters from code1 to code2-1 includes any
lower case letters, return the corresponding upper case range.
"""
code3 = max(code1, ord('a'))
code4 = min(code2, ord('z') + 1)
if code3 < code4:
d = ord('A') - ord('a')
return (code3 + d, code4 + d)
else:
return None
def lowercase_range(code1, code2):
"""
If the range of characters from code1 to code2-1 includes any
upper case letters, return the corresponding lower case range.
"""
code3 = max(code1, ord('A'))
code4 = min(code2, ord('Z') + 1)
if code3 < code4:
d = ord('a') - ord('A')
return (code3 + d, code4 + d)
else:
return None
def CodeRanges(code_list):
"""
Given a list of codes as returned by chars_to_ranges, return
an RE which will match a character in any of the ranges.
"""
re_list = [CodeRange(code_list[i], code_list[i + 1]) for i in range(0, len(code_list), 2)]
return Alt(*re_list)
def CodeRange(code1, code2):
"""
CodeRange(code1, code2) is an RE which matches any character
with a code |c| in the range |code1| <= |c| < |code2|.
"""
if code1 <= nl_code < code2:
return Alt(RawCodeRange(code1, nl_code),
RawNewline,
RawCodeRange(nl_code + 1, code2))
else:
return RawCodeRange(code1, code2)
#
# Abstract classes
#
class RE(object):
"""RE is the base class for regular expression constructors.
The following operators are defined on REs:
re1 + re2 is an RE which matches |re1| followed by |re2|
re1 | re2 is an RE which matches either |re1| or |re2|
"""
nullable = 1 # True if this RE can match 0 input symbols
match_nl = 1 # True if this RE can match a string ending with '\n'
str = None # Set to a string to override the class's __str__ result
def build_machine(self, machine, initial_state, final_state,
match_bol, nocase):
"""
This method should add states to |machine| to implement this
RE, starting at |initial_state| and ending at |final_state|.
If |match_bol| is true, the RE must be able to match at the
beginning of a line. If nocase is true, upper and lower case
letters should be treated as equivalent.
"""
raise NotImplementedError("%s.build_machine not implemented" %
self.__class__.__name__)
def build_opt(self, m, initial_state, c):
"""
Given a state |s| of machine |m|, return a new state
reachable from |s| on character |c| or epsilon.
"""
s = m.new_state()
initial_state.link_to(s)
initial_state.add_transition(c, s)
return s
def __add__(self, other):
return Seq(self, other)
def __or__(self, other):
return Alt(self, other)
def __str__(self):
if self.str:
return self.str
else:
return self.calc_str()
def check_re(self, num, value):
if not isinstance(value, RE):
self.wrong_type(num, value, "Plex.RE instance")
def check_string(self, num, value):
if type(value) != type(''):
self.wrong_type(num, value, "string")
def check_char(self, num, value):
self.check_string(num, value)
if len(value) != 1:
raise Errors.PlexValueError("Invalid value for argument %d of Plex.%s."
"Expected a string of length 1, got: %s" % (
num, self.__class__.__name__, repr(value)))
def wrong_type(self, num, value, expected):
if type(value) == types.InstanceType:
got = "%s.%s instance" % (
value.__class__.__module__, value.__class__.__name__)
else:
got = type(value).__name__
raise Errors.PlexTypeError("Invalid type for argument %d of Plex.%s "
"(expected %s, got %s" % (
num, self.__class__.__name__, expected, got))
#
# Primitive RE constructors
# -------------------------
#
# These are the basic REs from which all others are built.
#
## class Char(RE):
## """
## Char(c) is an RE which matches the character |c|.
## """
## nullable = 0
## def __init__(self, char):
## self.char = char
## self.match_nl = char == '\n'
## def build_machine(self, m, initial_state, final_state, match_bol, nocase):
## c = self.char
## if match_bol and c != BOL:
## s1 = self.build_opt(m, initial_state, BOL)
## else:
## s1 = initial_state
## if c == '\n' or c == EOF:
## s1 = self.build_opt(m, s1, EOL)
## if len(c) == 1:
## code = ord(self.char)
## s1.add_transition((code, code+1), final_state)
## if nocase and is_letter_code(code):
## code2 = other_case_code(code)
## s1.add_transition((code2, code2+1), final_state)
## else:
## s1.add_transition(c, final_state)
## def calc_str(self):
## return "Char(%s)" % repr(self.char)
def Char(c):
"""
Char(c) is an RE which matches the character |c|.
"""
if len(c) == 1:
result = CodeRange(ord(c), ord(c) + 1)
else:
result = SpecialSymbol(c)
result.str = "Char(%s)" % repr(c)
return result
class RawCodeRange(RE):
"""
RawCodeRange(code1, code2) is a low-level RE which matches any character
with a code |c| in the range |code1| <= |c| < |code2|, where the range
does not include newline. For internal use only.
"""
nullable = 0
match_nl = 0
range = None # (code, code)
uppercase_range = None # (code, code) or None
lowercase_range = None # (code, code) or None
def __init__(self, code1, code2):
self.range = (code1, code2)
self.uppercase_range = uppercase_range(code1, code2)
self.lowercase_range = lowercase_range(code1, code2)
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
initial_state.add_transition(self.range, final_state)
if nocase:
if self.uppercase_range:
initial_state.add_transition(self.uppercase_range, final_state)
if self.lowercase_range:
initial_state.add_transition(self.lowercase_range, final_state)
def calc_str(self):
return "CodeRange(%d,%d)" % (self.code1, self.code2)
class _RawNewline(RE):
"""
RawNewline is a low-level RE which matches a newline character.
For internal use only.
"""
nullable = 0
match_nl = 1
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
s = self.build_opt(m, initial_state, EOL)
s.add_transition((nl_code, nl_code + 1), final_state)
RawNewline = _RawNewline()
class SpecialSymbol(RE):
"""
SpecialSymbol(sym) is an RE which matches the special input
symbol |sym|, which is one of BOL, EOL or EOF.
"""
nullable = 0
match_nl = 0
sym = None
def __init__(self, sym):
self.sym = sym
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
# Sequences 'bol bol' and 'bol eof' are impossible, so only need
# to allow for bol if sym is eol
if match_bol and self.sym == EOL:
initial_state = self.build_opt(m, initial_state, BOL)
initial_state.add_transition(self.sym, final_state)
class Seq(RE):
"""Seq(re1, re2, re3...) is an RE which matches |re1| followed by
|re2| followed by |re3|..."""
def __init__(self, *re_list):
nullable = 1
for i, re in enumerate(re_list):
self.check_re(i, re)
nullable = nullable and re.nullable
self.re_list = re_list
self.nullable = nullable
i = len(re_list)
match_nl = 0
while i:
i -= 1
re = re_list[i]
if re.match_nl:
match_nl = 1
break
if not re.nullable:
break
self.match_nl = match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
re_list = self.re_list
if len(re_list) == 0:
initial_state.link_to(final_state)
else:
s1 = initial_state
n = len(re_list)
for i, re in enumerate(re_list):
if i < n - 1:
s2 = m.new_state()
else:
s2 = final_state
re.build_machine(m, s1, s2, match_bol, nocase)
s1 = s2
match_bol = re.match_nl or (match_bol and re.nullable)
def calc_str(self):
return "Seq(%s)" % ','.join(map(str, self.re_list))
class Alt(RE):
"""Alt(re1, re2, re3...) is an RE which matches either |re1| or
|re2| or |re3|..."""
def __init__(self, *re_list):
self.re_list = re_list
nullable = 0
match_nl = 0
nullable_res = []
non_nullable_res = []
i = 1
for re in re_list:
self.check_re(i, re)
if re.nullable:
nullable_res.append(re)
nullable = 1
else:
non_nullable_res.append(re)
if re.match_nl:
match_nl = 1
i += 1
self.nullable_res = nullable_res
self.non_nullable_res = non_nullable_res
self.nullable = nullable
self.match_nl = match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
for re in self.nullable_res:
re.build_machine(m, initial_state, final_state, match_bol, nocase)
if self.non_nullable_res:
if match_bol:
initial_state = self.build_opt(m, initial_state, BOL)
for re in self.non_nullable_res:
re.build_machine(m, initial_state, final_state, 0, nocase)
def calc_str(self):
return "Alt(%s)" % ','.join(map(str, self.re_list))
class Rep1(RE):
"""Rep1(re) is an RE which matches one or more repetitions of |re|."""
def __init__(self, re):
self.check_re(1, re)
self.re = re
self.nullable = re.nullable
self.match_nl = re.match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
s1 = m.new_state()
s2 = m.new_state()
initial_state.link_to(s1)
self.re.build_machine(m, s1, s2, match_bol or self.re.match_nl, nocase)
s2.link_to(s1)
s2.link_to(final_state)
def calc_str(self):
return "Rep1(%s)" % self.re
class SwitchCase(RE):
"""
SwitchCase(re, nocase) is an RE which matches the same strings as RE,
but treating upper and lower case letters according to |nocase|. If
|nocase| is true, case is ignored, otherwise it is not.
"""
re = None
nocase = None
def __init__(self, re, nocase):
self.re = re
self.nocase = nocase
self.nullable = re.nullable
self.match_nl = re.match_nl
def build_machine(self, m, initial_state, final_state, match_bol, nocase):
self.re.build_machine(m, initial_state, final_state, match_bol,
self.nocase)
def calc_str(self):
if self.nocase:
name = "NoCase"
else:
name = "Case"
return "%s(%s)" % (name, self.re)
#
# Composite RE constructors
# -------------------------
#
# These REs are defined in terms of the primitive REs.
#
Empty = Seq()
Empty.__doc__ = \
"""
Empty is an RE which matches the empty string.
"""
Empty.str = "Empty"
def Str1(s):
"""
Str1(s) is an RE which matches the literal string |s|.
"""
result = Seq(*tuple(map(Char, s)))
result.str = "Str(%s)" % repr(s)
return result
def Str(*strs):
"""
Str(s) is an RE which matches the literal string |s|.
Str(s1, s2, s3, ...) is an RE which matches any of |s1| or |s2| or |s3|...
"""
if len(strs) == 1:
return Str1(strs[0])
else:
result = Alt(*tuple(map(Str1, strs)))
result.str = "Str(%s)" % ','.join(map(repr, strs))
return result
def Any(s):
"""
Any(s) is an RE which matches any character in the string |s|.
"""
#result = apply(Alt, tuple(map(Char, s)))
result = CodeRanges(chars_to_ranges(s))
result.str = "Any(%s)" % repr(s)
return result
def AnyBut(s):
"""
AnyBut(s) is an RE which matches any character (including
newline) which is not in the string |s|.
"""
ranges = chars_to_ranges(s)
ranges.insert(0, -maxint)
ranges.append(maxint)
result = CodeRanges(ranges)
result.str = "AnyBut(%s)" % repr(s)
return result
AnyChar = AnyBut("")
AnyChar.__doc__ = \
"""
AnyChar is an RE which matches any single character (including a newline).
"""
AnyChar.str = "AnyChar"
def Range(s1, s2=None):
"""
Range(c1, c2) is an RE which matches any single character in the range
|c1| to |c2| inclusive.
Range(s) where |s| is a string of even length is an RE which matches
any single character in the ranges |s[0]| to |s[1]|, |s[2]| to |s[3]|,...
"""
if s2:
result = CodeRange(ord(s1), ord(s2) + 1)
result.str = "Range(%s,%s)" % (s1, s2)
else:
ranges = []
for i in range(0, len(s1), 2):
ranges.append(CodeRange(ord(s1[i]), ord(s1[i + 1]) + 1))
result = Alt(*ranges)
result.str = "Range(%s)" % repr(s1)
return result
def Opt(re):
"""
Opt(re) is an RE which matches either |re| or the empty string.
"""
result = Alt(re, Empty)
result.str = "Opt(%s)" % re
return result
def Rep(re):
"""
Rep(re) is an RE which matches zero or more repetitions of |re|.
"""
result = Opt(Rep1(re))
result.str = "Rep(%s)" % re
return result
def NoCase(re):
"""
NoCase(re) is an RE which matches the same strings as RE, but treating
upper and lower case letters as equivalent.
"""
return SwitchCase(re, nocase=1)
def Case(re):
"""
Case(re) is an RE which matches the same strings as RE, but treating
upper and lower case letters as distinct, i.e. it cancels the effect
of any enclosing NoCase().
"""
return SwitchCase(re, nocase=0)
#
# RE Constants
#
Bol = Char(BOL)
Bol.__doc__ = \
"""
Bol is an RE which matches the beginning of a line.
"""
Bol.str = "Bol"
Eol = Char(EOL)
Eol.__doc__ = \
"""
Eol is an RE which matches the end of a line.
"""
Eol.str = "Eol"
Eof = Char(EOF)
Eof.__doc__ = \
"""
Eof is an RE which matches the end of the file.
"""
Eof.str = "Eof"
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Plex/Scanners.pxd 0000644 0001751 0000171 00000002711 00000000000 020406 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
import cython
from Cython.Plex.Actions cimport Action
cdef class Scanner:
cdef public lexicon
cdef public stream
cdef public name
cdef public unicode buffer
cdef public Py_ssize_t buf_start_pos
cdef public Py_ssize_t next_pos
cdef public Py_ssize_t cur_pos
cdef public Py_ssize_t cur_line
cdef public Py_ssize_t cur_line_start
cdef public Py_ssize_t start_pos
cdef public Py_ssize_t start_line
cdef public Py_ssize_t start_col
cdef public text
cdef public initial_state # int?
cdef public state_name
cdef public list queue
cdef public bint trace
cdef public cur_char
cdef public long input_state
cdef public level
@cython.final
@cython.locals(input_state=long)
cdef next_char(self)
@cython.locals(action=Action)
cpdef tuple read(self)
@cython.final
cdef tuple scan_a_token(self)
##cdef tuple position(self) # used frequently by Parsing.py
@cython.final
@cython.locals(cur_pos=Py_ssize_t, cur_line=Py_ssize_t, cur_line_start=Py_ssize_t,
input_state=long, next_pos=Py_ssize_t, state=dict,
buf_start_pos=Py_ssize_t, buf_len=Py_ssize_t, buf_index=Py_ssize_t,
trace=bint, discard=Py_ssize_t, data=unicode, buffer=unicode)
cdef run_machine_inlined(self)
@cython.final
cdef begin(self, state)
@cython.final
cdef produce(self, value, text = *)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Plex/Scanners.py 0000644 0001751 0000171 00000027743 00000000000 020257 0 ustar 00runner docker 0000000 0000000 # cython: auto_pickle=False
#=======================================================================
#
# Python Lexical Analyser
#
#
# Scanning an input stream
#
#=======================================================================
from __future__ import absolute_import
import cython
cython.declare(BOL=object, EOL=object, EOF=object, NOT_FOUND=object)
from . import Errors
from .Regexps import BOL, EOL, EOF
NOT_FOUND = object()
class Scanner(object):
"""
A Scanner is used to read tokens from a stream of characters
using the token set specified by a Plex.Lexicon.
Constructor:
Scanner(lexicon, stream, name = '')
See the docstring of the __init__ method for details.
Methods:
See the docstrings of the individual methods for more
information.
read() --> (value, text)
Reads the next lexical token from the stream.
position() --> (name, line, col)
Returns the position of the last token read using the
read() method.
begin(state_name)
Causes scanner to change state.
produce(value [, text])
Causes return of a token value to the caller of the
Scanner.
"""
# lexicon = None # Lexicon
# stream = None # file-like object
# name = ''
# buffer = ''
# buf_start_pos = 0 # position in input of start of buffer
# next_pos = 0 # position in input of next char to read
# cur_pos = 0 # position in input of current char
# cur_line = 1 # line number of current char
# cur_line_start = 0 # position in input of start of current line
# start_pos = 0 # position in input of start of token
# start_line = 0 # line number of start of token
# start_col = 0 # position in line of start of token
# text = None # text of last token read
# initial_state = None # Node
# state_name = '' # Name of initial state
# queue = None # list of tokens to be returned
# trace = 0
def __init__(self, lexicon, stream, name='', initial_pos=None):
"""
Scanner(lexicon, stream, name = '')
|lexicon| is a Plex.Lexicon instance specifying the lexical tokens
to be recognised.
|stream| can be a file object or anything which implements a
compatible read() method.
|name| is optional, and may be the name of the file being
scanned or any other identifying string.
"""
self.trace = 0
self.buffer = u''
self.buf_start_pos = 0
self.next_pos = 0
self.cur_pos = 0
self.cur_line = 1
self.start_pos = 0
self.start_line = 0
self.start_col = 0
self.text = None
self.state_name = None
self.lexicon = lexicon
self.stream = stream
self.name = name
self.queue = []
self.initial_state = None
self.begin('')
self.next_pos = 0
self.cur_pos = 0
self.cur_line_start = 0
self.cur_char = BOL
self.input_state = 1
if initial_pos is not None:
self.cur_line, self.cur_line_start = initial_pos[1], -initial_pos[2]
def read(self):
"""
Read the next lexical token from the stream and return a
tuple (value, text), where |value| is the value associated with
the token as specified by the Lexicon, and |text| is the actual
string read from the stream. Returns (None, '') on end of file.
"""
queue = self.queue
while not queue:
self.text, action = self.scan_a_token()
if action is None:
self.produce(None)
self.eof()
else:
value = action.perform(self, self.text)
if value is not None:
self.produce(value)
result = queue[0]
del queue[0]
return result
def scan_a_token(self):
"""
Read the next input sequence recognised by the machine
and return (text, action). Returns ('', None) on end of
file.
"""
self.start_pos = self.cur_pos
self.start_line = self.cur_line
self.start_col = self.cur_pos - self.cur_line_start
action = self.run_machine_inlined()
if action is not None:
if self.trace:
print("Scanner: read: Performing %s %d:%d" % (
action, self.start_pos, self.cur_pos))
text = self.buffer[
self.start_pos - self.buf_start_pos:
self.cur_pos - self.buf_start_pos]
return (text, action)
else:
if self.cur_pos == self.start_pos:
if self.cur_char is EOL:
self.next_char()
if self.cur_char is None or self.cur_char is EOF:
return (u'', None)
raise Errors.UnrecognizedInput(self, self.state_name)
def run_machine_inlined(self):
"""
Inlined version of run_machine for speed.
"""
state = self.initial_state
cur_pos = self.cur_pos
cur_line = self.cur_line
cur_line_start = self.cur_line_start
cur_char = self.cur_char
input_state = self.input_state
next_pos = self.next_pos
buffer = self.buffer
buf_start_pos = self.buf_start_pos
buf_len = len(buffer)
b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \
None, 0, 0, 0, u'', 0, 0
trace = self.trace
while 1:
if trace: #TRACE#
print("State %d, %d/%d:%s -->" % ( #TRACE#
state['number'], input_state, cur_pos, repr(cur_char))) #TRACE#
# Begin inlined self.save_for_backup()
#action = state.action #@slow
action = state['action'] #@fast
if action is not None:
b_action, b_cur_pos, b_cur_line, b_cur_line_start, b_cur_char, b_input_state, b_next_pos = \
action, cur_pos, cur_line, cur_line_start, cur_char, input_state, next_pos
# End inlined self.save_for_backup()
c = cur_char
#new_state = state.new_state(c) #@slow
new_state = state.get(c, NOT_FOUND) #@fast
if new_state is NOT_FOUND: #@fast
new_state = c and state.get('else') #@fast
if new_state:
if trace: #TRACE#
print("State %d" % new_state['number']) #TRACE#
state = new_state
# Begin inlined: self.next_char()
if input_state == 1:
cur_pos = next_pos
# Begin inlined: c = self.read_char()
buf_index = next_pos - buf_start_pos
if buf_index < buf_len:
c = buffer[buf_index]
next_pos += 1
else:
discard = self.start_pos - buf_start_pos
data = self.stream.read(0x1000)
buffer = self.buffer[discard:] + data
self.buffer = buffer
buf_start_pos += discard
self.buf_start_pos = buf_start_pos
buf_len = len(buffer)
buf_index -= discard
if data:
c = buffer[buf_index]
next_pos += 1
else:
c = u''
# End inlined: c = self.read_char()
if c == u'\n':
cur_char = EOL
input_state = 2
elif not c:
cur_char = EOL
input_state = 4
else:
cur_char = c
elif input_state == 2:
cur_char = u'\n'
input_state = 3
elif input_state == 3:
cur_line += 1
cur_line_start = cur_pos = next_pos
cur_char = BOL
input_state = 1
elif input_state == 4:
cur_char = EOF
input_state = 5
else: # input_state = 5
cur_char = u''
# End inlined self.next_char()
else: # not new_state
if trace: #TRACE#
print("blocked") #TRACE#
# Begin inlined: action = self.back_up()
if b_action is not None:
(action, cur_pos, cur_line, cur_line_start,
cur_char, input_state, next_pos) = \
(b_action, b_cur_pos, b_cur_line, b_cur_line_start,
b_cur_char, b_input_state, b_next_pos)
else:
action = None
break # while 1
# End inlined: action = self.back_up()
self.cur_pos = cur_pos
self.cur_line = cur_line
self.cur_line_start = cur_line_start
self.cur_char = cur_char
self.input_state = input_state
self.next_pos = next_pos
if trace: #TRACE#
if action is not None: #TRACE#
print("Doing %s" % action) #TRACE#
return action
def next_char(self):
input_state = self.input_state
if self.trace:
print("Scanner: next: %s [%d] %d" % (" " * 20, input_state, self.cur_pos))
if input_state == 1:
self.cur_pos = self.next_pos
c = self.read_char()
if c == u'\n':
self.cur_char = EOL
self.input_state = 2
elif not c:
self.cur_char = EOL
self.input_state = 4
else:
self.cur_char = c
elif input_state == 2:
self.cur_char = u'\n'
self.input_state = 3
elif input_state == 3:
self.cur_line += 1
self.cur_line_start = self.cur_pos = self.next_pos
self.cur_char = BOL
self.input_state = 1
elif input_state == 4:
self.cur_char = EOF
self.input_state = 5
else: # input_state = 5
self.cur_char = u''
if self.trace:
print("--> [%d] %d %r" % (input_state, self.cur_pos, self.cur_char))
def position(self):
"""
Return a tuple (name, line, col) representing the location of
the last token read using the read() method. |name| is the
name that was provided to the Scanner constructor; |line|
is the line number in the stream (1-based); |col| is the
position within the line of the first character of the token
(0-based).
"""
return (self.name, self.start_line, self.start_col)
def get_position(self):
"""Python accessible wrapper around position(), only for error reporting.
"""
return self.position()
def begin(self, state_name):
"""Set the current state of the scanner to the named state."""
self.initial_state = (
self.lexicon.get_initial_state(state_name))
self.state_name = state_name
def produce(self, value, text=None):
"""
Called from an action procedure, causes |value| to be returned
as the token value from read(). If |text| is supplied, it is
returned in place of the scanned text.
produce() can be called more than once during a single call to an action
procedure, in which case the tokens are queued up and returned one
at a time by subsequent calls to read(), until the queue is empty,
whereupon scanning resumes.
"""
if text is None:
text = self.text
self.queue.append((value, text))
def eof(self):
"""
Override this method if you want something to be done at
end of file.
"""
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Plex/Timing.py 0000644 0001751 0000171 00000000730 00000000000 017715 0 ustar 00runner docker 0000000 0000000 #
# Get time in platform-dependent way
#
from __future__ import absolute_import
import os
from sys import platform, exit, stderr
if platform == 'mac':
import MacOS
def time():
return MacOS.GetTicks() / 60.0
timekind = "real"
elif hasattr(os, 'times'):
def time():
t = os.times()
return t[0] + t[1]
timekind = "cpu"
else:
stderr.write(
"Don't know how to get time on platform %s\n" % repr(platform))
exit(1)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Plex/Traditional.py 0000644 0001751 0000171 00000010030 00000000000 020732 0 ustar 00runner docker 0000000 0000000 #=======================================================================
#
# Python Lexical Analyser
#
# Traditional Regular Expression Syntax
#
#=======================================================================
from __future__ import absolute_import
from .Regexps import Alt, Seq, Rep, Rep1, Opt, Any, AnyBut, Bol, Eol, Char
from .Errors import PlexError
class RegexpSyntaxError(PlexError):
pass
def re(s):
"""
Convert traditional string representation of regular expression |s|
into Plex representation.
"""
return REParser(s).parse_re()
class REParser(object):
def __init__(self, s):
self.s = s
self.i = -1
self.end = 0
self.next()
def parse_re(self):
re = self.parse_alt()
if not self.end:
self.error("Unexpected %s" % repr(self.c))
return re
def parse_alt(self):
"""Parse a set of alternative regexps."""
re = self.parse_seq()
if self.c == '|':
re_list = [re]
while self.c == '|':
self.next()
re_list.append(self.parse_seq())
re = Alt(*re_list)
return re
def parse_seq(self):
"""Parse a sequence of regexps."""
re_list = []
while not self.end and not self.c in "|)":
re_list.append(self.parse_mod())
return Seq(*re_list)
def parse_mod(self):
"""Parse a primitive regexp followed by *, +, ? modifiers."""
re = self.parse_prim()
while not self.end and self.c in "*+?":
if self.c == '*':
re = Rep(re)
elif self.c == '+':
re = Rep1(re)
else: # self.c == '?'
re = Opt(re)
self.next()
return re
def parse_prim(self):
"""Parse a primitive regexp."""
c = self.get()
if c == '.':
re = AnyBut("\n")
elif c == '^':
re = Bol
elif c == '$':
re = Eol
elif c == '(':
re = self.parse_alt()
self.expect(')')
elif c == '[':
re = self.parse_charset()
self.expect(']')
else:
if c == '\\':
c = self.get()
re = Char(c)
return re
def parse_charset(self):
"""Parse a charset. Does not include the surrounding []."""
char_list = []
invert = 0
if self.c == '^':
invert = 1
self.next()
if self.c == ']':
char_list.append(']')
self.next()
while not self.end and self.c != ']':
c1 = self.get()
if self.c == '-' and self.lookahead(1) != ']':
self.next()
c2 = self.get()
for a in range(ord(c1), ord(c2) + 1):
char_list.append(chr(a))
else:
char_list.append(c1)
chars = ''.join(char_list)
if invert:
return AnyBut(chars)
else:
return Any(chars)
def next(self):
"""Advance to the next char."""
s = self.s
i = self.i = self.i + 1
if i < len(s):
self.c = s[i]
else:
self.c = ''
self.end = 1
def get(self):
if self.end:
self.error("Premature end of string")
c = self.c
self.next()
return c
def lookahead(self, n):
"""Look ahead n chars."""
j = self.i + n
if j < len(self.s):
return self.s[j]
else:
return ''
def expect(self, c):
"""
Expect to find character |c| at current position.
Raises an exception otherwise.
"""
if self.c == c:
self.next()
else:
self.error("Missing %s" % repr(c))
def error(self, mess):
"""Raise exception to signal syntax error in regexp."""
raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % (
repr(self.s), self.i, mess))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Plex/Transitions.py 0000644 0001751 0000171 00000016023 00000000000 021005 0 ustar 00runner docker 0000000 0000000 #
# Plex - Transition Maps
#
# This version represents state sets directly as dicts for speed.
#
from __future__ import absolute_import
try:
from sys import maxsize as maxint
except ImportError:
from sys import maxint
class TransitionMap(object):
"""
A TransitionMap maps an input event to a set of states.
An input event is one of: a range of character codes,
the empty string (representing an epsilon move), or one
of the special symbols BOL, EOL, EOF.
For characters, this implementation compactly represents
the map by means of a list:
[code_0, states_0, code_1, states_1, code_2, states_2,
..., code_n-1, states_n-1, code_n]
where |code_i| is a character code, and |states_i| is a
set of states corresponding to characters with codes |c|
in the range |code_i| <= |c| <= |code_i+1|.
The following invariants hold:
n >= 1
code_0 == -maxint
code_n == maxint
code_i < code_i+1 for i in 0..n-1
states_0 == states_n-1
Mappings for the special events '', BOL, EOL, EOF are
kept separately in a dictionary.
"""
map = None # The list of codes and states
special = None # Mapping for special events
def __init__(self, map=None, special=None):
if not map:
map = [-maxint, {}, maxint]
if not special:
special = {}
self.map = map
self.special = special
#self.check() ###
def add(self, event, new_state,
TupleType=tuple):
"""
Add transition to |new_state| on |event|.
"""
if type(event) is TupleType:
code0, code1 = event
i = self.split(code0)
j = self.split(code1)
map = self.map
while i < j:
map[i + 1][new_state] = 1
i += 2
else:
self.get_special(event)[new_state] = 1
def add_set(self, event, new_set,
TupleType=tuple):
"""
Add transitions to the states in |new_set| on |event|.
"""
if type(event) is TupleType:
code0, code1 = event
i = self.split(code0)
j = self.split(code1)
map = self.map
while i < j:
map[i + 1].update(new_set)
i += 2
else:
self.get_special(event).update(new_set)
def get_epsilon(self,
none=None):
"""
Return the mapping for epsilon, or None.
"""
return self.special.get('', none)
def iteritems(self,
len=len):
"""
Return the mapping as an iterable of ((code1, code2), state_set) and
(special_event, state_set) pairs.
"""
result = []
map = self.map
else_set = map[1]
i = 0
n = len(map) - 1
code0 = map[0]
while i < n:
set = map[i + 1]
code1 = map[i + 2]
if set or else_set:
result.append(((code0, code1), set))
code0 = code1
i += 2
for event, set in self.special.items():
if set:
result.append((event, set))
return iter(result)
items = iteritems
# ------------------- Private methods --------------------
def split(self, code,
len=len, maxint=maxint):
"""
Search the list for the position of the split point for |code|,
inserting a new split point if necessary. Returns index |i| such
that |code| == |map[i]|.
"""
# We use a funky variation on binary search.
map = self.map
hi = len(map) - 1
# Special case: code == map[-1]
if code == maxint:
return hi
# General case
lo = 0
# loop invariant: map[lo] <= code < map[hi] and hi - lo >= 2
while hi - lo >= 4:
# Find midpoint truncated to even index
mid = ((lo + hi) // 2) & ~1
if code < map[mid]:
hi = mid
else:
lo = mid
# map[lo] <= code < map[hi] and hi - lo == 2
if map[lo] == code:
return lo
else:
map[hi:hi] = [code, map[hi - 1].copy()]
#self.check() ###
return hi
def get_special(self, event):
"""
Get state set for special event, adding a new entry if necessary.
"""
special = self.special
set = special.get(event, None)
if not set:
set = {}
special[event] = set
return set
# --------------------- Conversion methods -----------------------
def __str__(self):
map_strs = []
map = self.map
n = len(map)
i = 0
while i < n:
code = map[i]
if code == -maxint:
code_str = "-inf"
elif code == maxint:
code_str = "inf"
else:
code_str = str(code)
map_strs.append(code_str)
i += 1
if i < n:
map_strs.append(state_set_str(map[i]))
i += 1
special_strs = {}
for event, set in self.special.items():
special_strs[event] = state_set_str(set)
return "[%s]+%s" % (
','.join(map_strs),
special_strs
)
# --------------------- Debugging methods -----------------------
def check(self):
"""Check data structure integrity."""
if not self.map[-3] < self.map[-1]:
print(self)
assert 0
def dump(self, file):
map = self.map
i = 0
n = len(map) - 1
while i < n:
self.dump_range(map[i], map[i + 2], map[i + 1], file)
i += 2
for event, set in self.special.items():
if set:
if not event:
event = 'empty'
self.dump_trans(event, set, file)
def dump_range(self, code0, code1, set, file):
if set:
if code0 == -maxint:
if code1 == maxint:
k = "any"
else:
k = "< %s" % self.dump_char(code1)
elif code1 == maxint:
k = "> %s" % self.dump_char(code0 - 1)
elif code0 == code1 - 1:
k = self.dump_char(code0)
else:
k = "%s..%s" % (self.dump_char(code0),
self.dump_char(code1 - 1))
self.dump_trans(k, set, file)
def dump_char(self, code):
if 0 <= code <= 255:
return repr(chr(code))
else:
return "chr(%d)" % code
def dump_trans(self, key, set, file):
file.write(" %s --> %s\n" % (key, self.dump_set(set)))
def dump_set(self, set):
return state_set_str(set)
#
# State set manipulation functions
#
#def merge_state_sets(set1, set2):
# for state in set2.keys():
# set1[state] = 1
def state_set_str(set):
return "[%s]" % ','.join(["S%d" % state.number for state in set])
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Plex/__init__.py 0000644 0001751 0000171 00000002402 00000000000 020223 0 ustar 00runner docker 0000000 0000000 #=======================================================================
#
# Python Lexical Analyser
#
#=======================================================================
"""
The Plex module provides lexical analysers with similar capabilities
to GNU Flex. The following classes and functions are exported;
see the attached docstrings for more information.
Scanner For scanning a character stream under the
direction of a Lexicon.
Lexicon For constructing a lexical definition
to be used by a Scanner.
Str, Any, AnyBut, AnyChar, Seq, Alt, Opt, Rep, Rep1,
Bol, Eol, Eof, Empty
Regular expression constructors, for building pattern
definitions for a Lexicon.
State For defining scanner states when creating a
Lexicon.
TEXT, IGNORE, Begin
Actions for associating with patterns when
creating a Lexicon.
"""
from __future__ import absolute_import
from .Actions import TEXT, IGNORE, Begin
from .Lexicons import Lexicon, State
from .Regexps import RE, Seq, Alt, Rep1, Empty, Str, Any, AnyBut, AnyChar, Range
from .Regexps import Opt, Rep, Bol, Eol, Eof, Case, NoCase
from .Scanners import Scanner
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6682942
Cython-0.29.28/Cython/Runtime/ 0000755 0001751 0000171 00000000000 00000000000 016627 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Runtime/__init__.py 0000644 0001751 0000171 00000000015 00000000000 020734 0 ustar 00runner docker 0000000 0000000 # empty file
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Runtime/refnanny.pyx 0000644 0001751 0000171 00000014207 00000000000 021215 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3, auto_pickle=False
from cpython.ref cimport PyObject, Py_INCREF, Py_DECREF, Py_XDECREF, Py_XINCREF
from cpython.exc cimport PyErr_Fetch, PyErr_Restore
from cpython.pystate cimport PyThreadState_Get
cimport cython
loglevel = 0
reflog = []
cdef log(level, action, obj, lineno):
if loglevel >= level:
reflog.append((lineno, action, id(obj)))
LOG_NONE, LOG_ALL = range(2)
@cython.final
cdef class Context(object):
cdef readonly object name, filename
cdef readonly dict refs
cdef readonly list errors
cdef readonly Py_ssize_t start
def __cinit__(self, name, line=0, filename=None):
self.name = name
self.start = line
self.filename = filename
self.refs = {} # id -> (count, [lineno])
self.errors = []
cdef regref(self, obj, lineno, bint is_null):
log(LOG_ALL, u'regref', u"" if is_null else obj, lineno)
if is_null:
self.errors.append(f"NULL argument on line {lineno}")
return
id_ = id(obj)
count, linenumbers = self.refs.get(id_, (0, []))
self.refs[id_] = (count + 1, linenumbers)
linenumbers.append(lineno)
cdef bint delref(self, obj, lineno, bint is_null) except -1:
# returns whether it is ok to do the decref operation
log(LOG_ALL, u'delref', u"" if is_null else obj, lineno)
if is_null:
self.errors.append(f"NULL argument on line {lineno}")
return False
id_ = id(obj)
count, linenumbers = self.refs.get(id_, (0, []))
if count == 0:
self.errors.append(f"Too many decrefs on line {lineno}, reference acquired on lines {linenumbers!r}")
return False
elif count == 1:
del self.refs[id_]
return True
else:
self.refs[id_] = (count - 1, linenumbers)
return True
cdef end(self):
if self.refs:
msg = u"References leaked:"
for count, linenos in self.refs.itervalues():
msg += f"\n ({count}) acquired on lines: {u', '.join([f'{x}' for x in linenos])}"
self.errors.append(msg)
if self.errors:
return u"\n".join([u'REFNANNY: '+error for error in self.errors])
else:
return None
cdef void report_unraisable(object e=None):
try:
if e is None:
import sys
e = sys.exc_info()[1]
print(f"refnanny raised an exception: {e}")
except:
pass # We absolutely cannot exit with an exception
# All Python operations must happen after any existing
# exception has been fetched, in case we are called from
# exception-handling code.
cdef PyObject* SetupContext(char* funcname, int lineno, char* filename) except NULL:
if Context is None:
# Context may be None during finalize phase.
# In that case, we don't want to be doing anything fancy
# like caching and resetting exceptions.
return NULL
cdef (PyObject*) type = NULL, value = NULL, tb = NULL, result = NULL
PyThreadState_Get()
PyErr_Fetch(&type, &value, &tb)
try:
ctx = Context(funcname, lineno, filename)
Py_INCREF(ctx)
result = ctx
except Exception, e:
report_unraisable(e)
PyErr_Restore(type, value, tb)
return result
cdef void GOTREF(PyObject* ctx, PyObject* p_obj, int lineno):
if ctx == NULL: return
cdef (PyObject*) type = NULL, value = NULL, tb = NULL
PyErr_Fetch(&type, &value, &tb)
try:
try:
if p_obj is NULL:
(ctx).regref(None, lineno, True)
else:
(ctx).regref(p_obj, lineno, False)
except:
report_unraisable()
except:
# __Pyx_GetException may itself raise errors
pass
PyErr_Restore(type, value, tb)
cdef int GIVEREF_and_report(PyObject* ctx, PyObject* p_obj, int lineno):
if ctx == NULL: return 1
cdef (PyObject*) type = NULL, value = NULL, tb = NULL
cdef bint decref_ok = False
PyErr_Fetch(&type, &value, &tb)
try:
try:
if p_obj is NULL:
decref_ok = (ctx).delref(None, lineno, True)
else:
decref_ok = (ctx).delref(p_obj, lineno, False)
except:
report_unraisable()
except:
# __Pyx_GetException may itself raise errors
pass
PyErr_Restore(type, value, tb)
return decref_ok
cdef void GIVEREF(PyObject* ctx, PyObject* p_obj, int lineno):
GIVEREF_and_report(ctx, p_obj, lineno)
cdef void INCREF(PyObject* ctx, PyObject* obj, int lineno):
Py_XINCREF(obj)
PyThreadState_Get()
GOTREF(ctx, obj, lineno)
cdef void DECREF(PyObject* ctx, PyObject* obj, int lineno):
if GIVEREF_and_report(ctx, obj, lineno):
Py_XDECREF(obj)
PyThreadState_Get()
cdef void FinishContext(PyObject** ctx):
if ctx == NULL or ctx[0] == NULL: return
cdef (PyObject*) type = NULL, value = NULL, tb = NULL
cdef object errors = None
cdef Context context
PyThreadState_Get()
PyErr_Fetch(&type, &value, &tb)
try:
try:
context = ctx[0]
errors = context.end()
if errors:
print(f"{context.filename.decode('latin1')}: {context.name.decode('latin1')}()")
print(errors)
context = None
except:
report_unraisable()
except:
# __Pyx_GetException may itself raise errors
pass
Py_XDECREF(ctx[0])
ctx[0] = NULL
PyErr_Restore(type, value, tb)
ctypedef struct RefNannyAPIStruct:
void (*INCREF)(PyObject*, PyObject*, int)
void (*DECREF)(PyObject*, PyObject*, int)
void (*GOTREF)(PyObject*, PyObject*, int)
void (*GIVEREF)(PyObject*, PyObject*, int)
PyObject* (*SetupContext)(char*, int, char*) except NULL
void (*FinishContext)(PyObject**)
cdef RefNannyAPIStruct api
api.INCREF = INCREF
api.DECREF = DECREF
api.GOTREF = GOTREF
api.GIVEREF = GIVEREF
api.SetupContext = SetupContext
api.FinishContext = FinishContext
cdef extern from "Python.h":
object PyLong_FromVoidPtr(void*)
RefNannyAPI = PyLong_FromVoidPtr(&api)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Shadow.py 0000644 0001751 0000171 00000031273 00000000000 017011 0 ustar 00runner docker 0000000 0000000 # cython.* namespace for pure mode.
from __future__ import absolute_import
__version__ = "0.29.28"
try:
from __builtin__ import basestring
except ImportError:
basestring = str
# BEGIN shameless copy from Cython/minivect/minitypes.py
class _ArrayType(object):
is_array = True
subtypes = ['dtype']
def __init__(self, dtype, ndim, is_c_contig=False, is_f_contig=False,
inner_contig=False, broadcasting=None):
self.dtype = dtype
self.ndim = ndim
self.is_c_contig = is_c_contig
self.is_f_contig = is_f_contig
self.inner_contig = inner_contig or is_c_contig or is_f_contig
self.broadcasting = broadcasting
def __repr__(self):
axes = [":"] * self.ndim
if self.is_c_contig:
axes[-1] = "::1"
elif self.is_f_contig:
axes[0] = "::1"
return "%s[%s]" % (self.dtype, ", ".join(axes))
def index_type(base_type, item):
"""
Support array type creation by slicing, e.g. double[:, :] specifies
a 2D strided array of doubles. The syntax is the same as for
Cython memoryviews.
"""
class InvalidTypeSpecification(Exception):
pass
def verify_slice(s):
if s.start or s.stop or s.step not in (None, 1):
raise InvalidTypeSpecification(
"Only a step of 1 may be provided to indicate C or "
"Fortran contiguity")
if isinstance(item, tuple):
step_idx = None
for idx, s in enumerate(item):
verify_slice(s)
if s.step and (step_idx or idx not in (0, len(item) - 1)):
raise InvalidTypeSpecification(
"Step may only be provided once, and only in the "
"first or last dimension.")
if s.step == 1:
step_idx = idx
return _ArrayType(base_type, len(item),
is_c_contig=step_idx == len(item) - 1,
is_f_contig=step_idx == 0)
elif isinstance(item, slice):
verify_slice(item)
return _ArrayType(base_type, 1, is_c_contig=bool(item.step))
else:
# int[8] etc.
assert int(item) == item # array size must be a plain integer
array(base_type, item)
# END shameless copy
compiled = False
_Unspecified = object()
# Function decorators
def _empty_decorator(x):
return x
def locals(**arg_types):
return _empty_decorator
def test_assert_path_exists(*paths):
return _empty_decorator
def test_fail_if_path_exists(*paths):
return _empty_decorator
class _EmptyDecoratorAndManager(object):
def __call__(self, x):
return x
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
pass
class _Optimization(object):
pass
cclass = ccall = cfunc = _EmptyDecoratorAndManager()
returns = wraparound = boundscheck = initializedcheck = nonecheck = \
embedsignature = cdivision = cdivision_warnings = \
always_allows_keywords = profile = linetrace = infer_types = \
unraisable_tracebacks = freelist = \
lambda _: _EmptyDecoratorAndManager()
exceptval = lambda _=None, check=True: _EmptyDecoratorAndManager()
overflowcheck = lambda _: _EmptyDecoratorAndManager()
optimization = _Optimization()
overflowcheck.fold = optimization.use_switch = \
optimization.unpack_method_calls = lambda arg: _EmptyDecoratorAndManager()
final = internal = type_version_tag = no_gc_clear = no_gc = _empty_decorator
binding = lambda _: _empty_decorator
_cython_inline = None
def inline(f, *args, **kwds):
if isinstance(f, basestring):
global _cython_inline
if _cython_inline is None:
from Cython.Build.Inline import cython_inline as _cython_inline
return _cython_inline(f, *args, **kwds)
else:
assert len(args) == len(kwds) == 0
return f
def compile(f):
from Cython.Build.Inline import RuntimeCompiledFunction
return RuntimeCompiledFunction(f)
# Special functions
def cdiv(a, b):
q = a / b
if q < 0:
q += 1
return q
def cmod(a, b):
r = a % b
if (a*b) < 0:
r -= b
return r
# Emulated language constructs
def cast(type, *args, **kwargs):
kwargs.pop('typecheck', None)
assert not kwargs
if hasattr(type, '__call__'):
return type(*args)
else:
return args[0]
def sizeof(arg):
return 1
def typeof(arg):
return arg.__class__.__name__
# return type(arg)
def address(arg):
return pointer(type(arg))([arg])
def declare(type=None, value=_Unspecified, **kwds):
if type not in (None, object) and hasattr(type, '__call__'):
if value is not _Unspecified:
return type(value)
else:
return type()
else:
return value
class _nogil(object):
"""Support for 'with nogil' statement and @nogil decorator.
"""
def __call__(self, x):
if callable(x):
# Used as function decorator => return the function unchanged.
return x
# Used as conditional context manager or to create an "@nogil(True/False)" decorator => keep going.
return self
def __enter__(self):
pass
def __exit__(self, exc_class, exc, tb):
return exc_class is None
nogil = _nogil()
gil = _nogil()
del _nogil
# Emulated types
class CythonMetaType(type):
def __getitem__(type, ix):
return array(type, ix)
CythonTypeObject = CythonMetaType('CythonTypeObject', (object,), {})
class CythonType(CythonTypeObject):
def _pointer(self, n=1):
for i in range(n):
self = pointer(self)
return self
class PointerType(CythonType):
def __init__(self, value=None):
if isinstance(value, (ArrayType, PointerType)):
self._items = [cast(self._basetype, a) for a in value._items]
elif isinstance(value, list):
self._items = [cast(self._basetype, a) for a in value]
elif value is None or value == 0:
self._items = []
else:
raise ValueError
def __getitem__(self, ix):
if ix < 0:
raise IndexError("negative indexing not allowed in C")
return self._items[ix]
def __setitem__(self, ix, value):
if ix < 0:
raise IndexError("negative indexing not allowed in C")
self._items[ix] = cast(self._basetype, value)
def __eq__(self, value):
if value is None and not self._items:
return True
elif type(self) != type(value):
return False
else:
return not self._items and not value._items
def __repr__(self):
return "%s *" % (self._basetype,)
class ArrayType(PointerType):
def __init__(self):
self._items = [None] * self._n
class StructType(CythonType):
def __init__(self, cast_from=_Unspecified, **data):
if cast_from is not _Unspecified:
# do cast
if len(data) > 0:
raise ValueError('Cannot accept keyword arguments when casting.')
if type(cast_from) is not type(self):
raise ValueError('Cannot cast from %s'%cast_from)
for key, value in cast_from.__dict__.items():
setattr(self, key, value)
else:
for key, value in data.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if key in self._members:
self.__dict__[key] = cast(self._members[key], value)
else:
raise AttributeError("Struct has no member '%s'" % key)
class UnionType(CythonType):
def __init__(self, cast_from=_Unspecified, **data):
if cast_from is not _Unspecified:
# do type cast
if len(data) > 0:
raise ValueError('Cannot accept keyword arguments when casting.')
if isinstance(cast_from, dict):
datadict = cast_from
elif type(cast_from) is type(self):
datadict = cast_from.__dict__
else:
raise ValueError('Cannot cast from %s'%cast_from)
else:
datadict = data
if len(datadict) > 1:
raise AttributeError("Union can only store one field at a time.")
for key, value in datadict.items():
setattr(self, key, value)
def __setattr__(self, key, value):
if key in '__dict__':
CythonType.__setattr__(self, key, value)
elif key in self._members:
self.__dict__ = {key: cast(self._members[key], value)}
else:
raise AttributeError("Union has no member '%s'" % key)
def pointer(basetype):
class PointerInstance(PointerType):
_basetype = basetype
return PointerInstance
def array(basetype, n):
class ArrayInstance(ArrayType):
_basetype = basetype
_n = n
return ArrayInstance
def struct(**members):
class StructInstance(StructType):
_members = members
for key in members:
setattr(StructInstance, key, None)
return StructInstance
def union(**members):
class UnionInstance(UnionType):
_members = members
for key in members:
setattr(UnionInstance, key, None)
return UnionInstance
class typedef(CythonType):
def __init__(self, type, name=None):
self._basetype = type
self.name = name
def __call__(self, *arg):
value = cast(self._basetype, *arg)
return value
def __repr__(self):
return self.name or str(self._basetype)
__getitem__ = index_type
class _FusedType(CythonType):
pass
def fused_type(*args):
if not args:
raise TypeError("Expected at least one type as argument")
# Find the numeric type with biggest rank if all types are numeric
rank = -1
for type in args:
if type not in (py_int, py_long, py_float, py_complex):
break
if type_ordering.index(type) > rank:
result_type = type
else:
return result_type
# Not a simple numeric type, return a fused type instance. The result
# isn't really meant to be used, as we can't keep track of the context in
# pure-mode. Casting won't do anything in this case.
return _FusedType()
def _specialized_from_args(signatures, args, kwargs):
"Perhaps this should be implemented in a TreeFragment in Cython code"
raise Exception("yet to be implemented")
py_int = typedef(int, "int")
try:
py_long = typedef(long, "long")
except NameError: # Py3
py_long = typedef(int, "long")
py_float = typedef(float, "float")
py_complex = typedef(complex, "double complex")
# Predefined types
int_types = ['char', 'short', 'Py_UNICODE', 'int', 'Py_UCS4', 'long', 'longlong', 'Py_ssize_t', 'size_t']
float_types = ['longdouble', 'double', 'float']
complex_types = ['longdoublecomplex', 'doublecomplex', 'floatcomplex', 'complex']
other_types = ['bint', 'void', 'Py_tss_t']
to_repr = {
'longlong': 'long long',
'longdouble': 'long double',
'longdoublecomplex': 'long double complex',
'doublecomplex': 'double complex',
'floatcomplex': 'float complex',
}.get
gs = globals()
# note: cannot simply name the unicode type here as 2to3 gets in the way and replaces it by str
try:
import __builtin__ as builtins
except ImportError: # Py3
import builtins
gs['unicode'] = typedef(getattr(builtins, 'unicode', str), 'unicode')
del builtins
for name in int_types:
reprname = to_repr(name, name)
gs[name] = typedef(py_int, reprname)
if name not in ('Py_UNICODE', 'Py_UCS4') and not name.endswith('size_t'):
gs['u'+name] = typedef(py_int, "unsigned " + reprname)
gs['s'+name] = typedef(py_int, "signed " + reprname)
for name in float_types:
gs[name] = typedef(py_float, to_repr(name, name))
for name in complex_types:
gs[name] = typedef(py_complex, to_repr(name, name))
bint = typedef(bool, "bint")
void = typedef(None, "void")
Py_tss_t = typedef(None, "Py_tss_t")
for t in int_types + float_types + complex_types + other_types:
for i in range(1, 4):
gs["%s_%s" % ('p'*i, t)] = gs[t]._pointer(i)
NULL = gs['p_void'](0)
# looks like 'gs' has some users out there by now...
#del gs
integral = floating = numeric = _FusedType()
type_ordering = [py_int, py_long, py_float, py_complex]
class CythonDotParallel(object):
"""
The cython.parallel module.
"""
__all__ = ['parallel', 'prange', 'threadid']
def parallel(self, num_threads=None):
return nogil
def prange(self, start=0, stop=None, step=1, nogil=False, schedule=None, chunksize=None, num_threads=None):
if stop is None:
stop = start
start = 0
return range(start, stop, step)
def threadid(self):
return 0
# def threadsavailable(self):
# return 1
import sys
sys.modules['cython.parallel'] = CythonDotParallel()
del sys
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/StringIOTree.pxd 0000644 0001751 0000171 00000000676 00000000000 020250 0 ustar 00runner docker 0000000 0000000 cimport cython
cdef class StringIOTree:
cdef public list prepended_children
cdef public object stream
cdef public object write
cdef public list markers
@cython.locals(x=StringIOTree)
cpdef getvalue(self)
@cython.locals(child=StringIOTree)
cpdef copyto(self, target)
cpdef commit(self)
#def insert(self, iotree)
#def insertion_point(self)
@cython.locals(c=StringIOTree)
cpdef allmarkers(self)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/StringIOTree.py 0000644 0001751 0000171 00000006410 00000000000 020075 0 ustar 00runner docker 0000000 0000000 # cython: auto_pickle=False
r"""
Implements a buffer with insertion points. When you know you need to
"get back" to a place and write more later, simply call insertion_point()
at that spot and get a new StringIOTree object that is "left behind".
EXAMPLE:
>>> a = StringIOTree()
>>> _= a.write('first\n')
>>> b = a.insertion_point()
>>> _= a.write('third\n')
>>> _= b.write('second\n')
>>> a.getvalue().split()
['first', 'second', 'third']
>>> c = b.insertion_point()
>>> d = c.insertion_point()
>>> _= d.write('alpha\n')
>>> _= b.write('gamma\n')
>>> _= c.write('beta\n')
>>> b.getvalue().split()
['second', 'alpha', 'beta', 'gamma']
>>> i = StringIOTree()
>>> d.insert(i)
>>> _= i.write('inserted\n')
>>> out = StringIO()
>>> a.copyto(out)
>>> out.getvalue().split()
['first', 'second', 'alpha', 'inserted', 'beta', 'gamma', 'third']
"""
from __future__ import absolute_import #, unicode_literals
try:
# Prefer cStringIO since io.StringIO() does not support writing 'str' in Py2.
from cStringIO import StringIO
except ImportError:
from io import StringIO
class StringIOTree(object):
"""
See module docs.
"""
def __init__(self, stream=None):
self.prepended_children = []
if stream is None:
stream = StringIO()
self.stream = stream
self.write = stream.write
self.markers = []
def getvalue(self):
content = [x.getvalue() for x in self.prepended_children]
content.append(self.stream.getvalue())
return "".join(content)
def copyto(self, target):
"""Potentially cheaper than getvalue as no string concatenation
needs to happen."""
for child in self.prepended_children:
child.copyto(target)
stream_content = self.stream.getvalue()
if stream_content:
target.write(stream_content)
def commit(self):
# Save what we have written until now so that the buffer
# itself is empty -- this makes it ready for insertion
if self.stream.tell():
self.prepended_children.append(StringIOTree(self.stream))
self.prepended_children[-1].markers = self.markers
self.markers = []
self.stream = StringIO()
self.write = self.stream.write
def insert(self, iotree):
"""
Insert a StringIOTree (and all of its contents) at this location.
Further writing to self appears after what is inserted.
"""
self.commit()
self.prepended_children.append(iotree)
def insertion_point(self):
"""
Returns a new StringIOTree, which is left behind at the current position
(it what is written to the result will appear right before whatever is
next written to self).
Calling getvalue() or copyto() on the result will only return the
contents written to it.
"""
# Save what we have written until now
# This is so that getvalue on the result doesn't include it.
self.commit()
# Construct the new forked object to return
other = StringIOTree()
self.prepended_children.append(other)
return other
def allmarkers(self):
children = self.prepended_children
return [m for c in children for m in c.allmarkers()] + self.markers
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6682942
Cython-0.29.28/Cython/Tempita/ 0000755 0001751 0000171 00000000000 00000000000 016607 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tempita/__init__.py 0000644 0001751 0000171 00000000230 00000000000 020713 0 ustar 00runner docker 0000000 0000000 # The original Tempita implements all of its templating code here.
# Moved it to _tempita.py to make the compilation portable.
from ._tempita import *
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tempita/_looper.py 0000644 0001751 0000171 00000010110 00000000000 020611 0 ustar 00runner docker 0000000 0000000 """
Helper for looping over sequences, particular in templates.
Often in a loop in a template it's handy to know what's next up,
previously up, if this is the first or last item in the sequence, etc.
These can be awkward to manage in a normal Python loop, but using the
looper you can get a better sense of the context. Use like::
>>> for loop, item in looper(['a', 'b', 'c']):
... print loop.number, item
... if not loop.last:
... print '---'
1 a
---
2 b
---
3 c
"""
import sys
from Cython.Tempita.compat3 import basestring_
__all__ = ['looper']
class looper(object):
"""
Helper for looping (particularly in templates)
Use this like::
for loop, item in looper(seq):
if loop.first:
...
"""
def __init__(self, seq):
self.seq = seq
def __iter__(self):
return looper_iter(self.seq)
def __repr__(self):
return '<%s for %r>' % (
self.__class__.__name__, self.seq)
class looper_iter(object):
def __init__(self, seq):
self.seq = list(seq)
self.pos = 0
def __iter__(self):
return self
def __next__(self):
if self.pos >= len(self.seq):
raise StopIteration
result = loop_pos(self.seq, self.pos), self.seq[self.pos]
self.pos += 1
return result
if sys.version < "3":
next = __next__
class loop_pos(object):
def __init__(self, seq, pos):
self.seq = seq
self.pos = pos
def __repr__(self):
return '' % (
self.seq[self.pos], self.pos)
def index(self):
return self.pos
index = property(index)
def number(self):
return self.pos + 1
number = property(number)
def item(self):
return self.seq[self.pos]
item = property(item)
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
__next__ = property(__next__)
if sys.version < "3":
next = __next__
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
previous = property(previous)
def odd(self):
return not self.pos % 2
odd = property(odd)
def even(self):
return self.pos % 2
even = property(even)
def first(self):
return self.pos == 0
first = property(first)
def last(self):
return self.pos == len(self.seq) - 1
last = property(last)
def length(self):
return len(self.seq)
length = property(length)
def first_group(self, getter=None):
"""
Returns true if this item is the start of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.first:
return True
return self._compare_group(self.item, self.previous, getter)
def last_group(self, getter=None):
"""
Returns true if this item is the end of a new group,
where groups mean that some attribute has changed. The getter
can be None (the item itself changes), an attribute name like
``'.attr'``, a function, or a dict key or list index.
"""
if self.last:
return True
return self._compare_group(self.item, self.__next__, getter)
def _compare_group(self, item, other, getter):
if getter is None:
return item != other
elif (isinstance(getter, basestring_)
and getter.startswith('.')):
getter = getter[1:]
if getter.endswith('()'):
getter = getter[:-2]
return getattr(item, getter)() != getattr(other, getter)()
else:
return getattr(item, getter) != getattr(other, getter)
elif hasattr(getter, '__call__'):
return getter(item) != getter(other)
else:
return item[getter] != other[getter]
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tempita/_tempita.py 0000644 0001751 0000171 00000115244 00000000000 020772 0 ustar 00runner docker 0000000 0000000 """
A small templating language
This implements a small templating language. This language implements
if/elif/else, for/continue/break, expressions, and blocks of Python
code. The syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
"""
from __future__ import absolute_import
import re
import sys
import cgi
try:
from urllib import quote as url_quote
except ImportError: # Py3
from urllib.parse import quote as url_quote
import os
import tokenize
from io import StringIO
from ._looper import looper
from .compat3 import bytes, unicode_, basestring_, next, is_unicode, coerce_text
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace,
get_template=from_template.get_template)
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
default_inherit = None
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
delimeters=None):
self.content = content
# set delimeters
if delimeters is None:
delimeters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
#assert len(delimeters) == 2 and all([isinstance(delimeter, basestring)
# for delimeter in delimeters])
self.default_namespace = self.__class__.default_namespace.copy()
self.default_namespace['start_braces'] = delimeters[0]
self.default_namespace['end_braces'] = delimeters[1]
self.delimeters = delimeters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__file__' in globals:
name = globals['__file__']
if name.endswith('.pyc') or name.endswith('.pyo'):
name = name[:-1]
elif '__name__' in globals:
name = globals['__name__']
else:
name = ''
if lineno:
name += ':%s' % lineno
self.name = name
self._parsed = parse(content, name=name, line_offset=line_offset, delimeters=self.delimeters)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
f = open(filename, 'rb')
c = f.read()
f.close()
if encoding:
c = c.decode(encoding)
return cls(content=c, name=filename, namespace=namespace,
default_inherit=default_inherit, get_template=get_template)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give one positional argument")
if not hasattr(args[0], 'items'):
raise TypeError(
"If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r"
% (args[0],))
kw = args[0]
ns = kw
ns['__template_name__'] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
__traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if '__inherit__' in defs:
inherit = defs.pop('__inherit__')
else:
inherit = None
return ''.join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
__traceback_hide__ = True
if not self.get_template:
raise TemplateError(
'You cannot use inheritance without passing in get_template',
position=None, name=self.name)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in defs.items():
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns['self'] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
__traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
__traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == 'inherit':
expr = code[2]
value = self._eval(expr, ns, pos)
defs['__inherit__'] = value
elif name == 'def':
name = code[2]
signature = code[3]
parts = code[4]
ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns,
pos=pos)
elif name == 'comment':
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out, defs):
__traceback_hide__ = True
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
'Need %i items to unpack (got %i items)'
% (len(vars), len(item)))
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out, defs)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out, defs):
__traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring_)
name, pos = part[0], part[1]
if name == 'else':
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out, defs)
break
def _eval(self, code, ns, pos):
__traceback_hide__ = True
try:
try:
value = eval(code, self.default_namespace, ns)
except SyntaxError as e:
raise SyntaxError(
'invalid syntax in expression: %s' % code)
return value
except Exception as e:
if getattr(e, 'args', None):
arg0 = e.args[0]
else:
arg0 = coerce_text(e)
e.args = (self._add_line_info(arg0, pos),)
raise
def _exec(self, code, ns, pos):
__traceback_hide__ = True
try:
exec(code, self.default_namespace, ns)
except Exception as e:
if e.args:
e.args = (self._add_line_info(e.args[0], pos),)
else:
e.args = (self._add_line_info(None, pos),)
raise
def _repr(self, value, pos):
__traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
try:
value = unicode_(value)
except UnicodeDecodeError:
value = bytes(value)
else:
if not isinstance(value, basestring_):
value = coerce_text(value)
if (is_unicode(value)
and self.default_encoding):
value = value.encode(self.default_encoding)
except Exception as e:
e.args = (self._add_line_info(e.args[0], pos),)
raise
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
raise UnicodeDecodeError(
'Cannot decode bytes value %r into unicode '
'(no default_encoding provided)' % value)
try:
value = value.decode(self.default_encoding)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
e.encoding,
e.object,
e.start,
e.end,
e.reason + ' in string %r' % value)
elif not self._unicode and is_unicode(value):
if not self.default_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into bytes '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (
msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, delimeters=None, **kw):
name = kw.get('__name')
tmpl = Template(content, name=name, delimeters=delimeters)
return tmpl.substitute(kw)
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in kw.items():
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%r' % (k, v) for k, v in sorted(self.items())]))
############################################################
## HTML Templating
############################################################
class html(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __html__(self):
return self.value
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__, self.value)
def html_quote(value, force=True):
if not force and hasattr(value, '__html__'):
return value.__html__()
if value is None:
return ''
if not isinstance(value, basestring_):
value = coerce_text(value)
if sys.version >= "3" and isinstance(value, bytes):
value = cgi.escape(value.decode('latin1'), 1)
value = value.encode('latin1')
else:
value = cgi.escape(value, 1)
if sys.version < "3":
if is_unicode(value):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def url(v):
v = coerce_text(v)
if is_unicode(v):
v = v.encode('utf8')
return url_quote(v)
def attr(**kw):
parts = []
for name, value in sorted(kw.items()):
if value is None:
continue
if name.endswith('_'):
name = name[:-1]
parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
return html(' '.join(parts))
class HTMLTemplate(Template):
default_namespace = Template.default_namespace.copy()
default_namespace.update(dict(
html=html,
attr=attr,
url=url,
html_quote=html_quote,
))
def _repr(self, value, pos):
if hasattr(value, '__html__'):
value = value.__html__()
quote = False
else:
quote = True
plain = Template._repr(self, value, pos)
if quote:
return html_quote(plain)
else:
return plain
def sub_html(content, **kw):
name = kw.get('__name')
tmpl = HTMLTemplate(content, name=name)
return tmpl.substitute(kw)
class TemplateDef(object):
def __init__(self, template, func_name, func_signature,
body, ns, pos, bound_self=None):
self._template = template
self._func_name = func_name
self._func_signature = func_signature
self._body = body
self._ns = ns
self._pos = pos
self._bound_self = bound_self
def __repr__(self):
return '' % (
self._func_name, self._func_signature,
self._template.name, self._pos)
def __str__(self):
return self()
def __call__(self, *args, **kw):
values = self._parse_signature(args, kw)
ns = self._ns.copy()
ns.update(values)
if self._bound_self is not None:
ns['self'] = self._bound_self
out = []
subdefs = {}
self._template._interpret_codes(self._body, ns, out, subdefs)
return ''.join(out)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.__class__(
self._template, self._func_name, self._func_signature,
self._body, self._ns, self._pos, bound_self=obj)
def _parse_signature(self, args, kw):
values = {}
sig_args, var_args, var_kw, defaults = self._func_signature
extra_kw = {}
for name, value in kw.items():
if not var_kw and name not in sig_args:
raise TypeError(
'Unexpected argument %s' % name)
if name in sig_args:
values[sig_args] = value
else:
extra_kw[name] = value
args = list(args)
sig_args = list(sig_args)
while args:
while sig_args and sig_args[0] in values:
sig_args.pop(0)
if sig_args:
name = sig_args.pop(0)
values[name] = args.pop(0)
elif var_args:
values[var_args] = tuple(args)
break
else:
raise TypeError(
'Extra position arguments: %s'
% ', '.join([repr(v) for v in args]))
for name, value_expr in defaults.items():
if name not in values:
values[name] = self._template._eval(
value_expr, self._ns, self._pos)
for name in sig_args:
if name not in values:
raise TypeError(
'Missing argument: %s' % name)
if var_kw:
values[var_kw] = extra_kw
return values
class TemplateObject(object):
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.__name)
class TemplateObjectGetter(object):
def __init__(self, template_obj):
self.__template_obj = template_obj
def __getattr__(self, attr):
return getattr(self.__template_obj, attr, Empty)
def __repr__(self):
return '<%s around %r>' % (self.__class__.__name__, self.__template_obj)
class _Empty(object):
def __call__(self, *args, **kw):
return self
def __str__(self):
return ''
def __repr__(self):
return 'Empty'
def __unicode__(self):
return u''
def __iter__(self):
return iter(())
def __bool__(self):
return False
if sys.version < "3":
__nonzero__ = __bool__
Empty = _Empty()
del _Empty
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
"""
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
if delimeters is None:
delimeters = ( Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'] )
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]),
re.escape(delimeters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), last, last_pos)
if expr == delimeters[0] and in_expr:
raise TemplateError('%s inside expression' % delimeters[0],
position=pos,
name=name)
elif expr == delimeters[1] and not in_expr:
raise TemplateError('%s outside expression' % delimeters[1],
position=pos,
name=name)
if expr == delimeters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No %s to finish last expression' % delimeters[1],
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
last_trim = None
for i, current in enumerate(tokens):
if isinstance(current, basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ''
else:
next_chunk = tokens[i + 1]
if (not isinstance(next_chunk, basestring_)
or not isinstance(prev, basestring_)):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = 'last'
if (prev_ok
and (not next_chunk or lead_whitespace_re.search(next_chunk)
or (i == len(tokens) - 2 and not next_chunk.strip()))):
if prev:
if ((i == 1 and not prev.strip())
or prev_ok == 'last'):
tokens[i - 1] = ''
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ''
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
def find_position(string, index, last_index, last_pos):
"""Given a string and index, return (line, column)"""
lines = string.count('\n', last_index, index)
if lines > 0:
column = index - string.rfind('\n', last_index, index)
else:
column = last_pos[1] + (index - last_index)
return (last_pos[0] + lines, column)
def parse(s, name=None, line_offset=0, delimeters=None):
r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}')
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
"""
if delimeters is None:
delimeters = ( Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'] )
tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n') or expr.startswith('\r'):
expr = expr.lstrip('\r\n')
if '\r' in expr:
expr = expr.replace('\r\n', '\n')
expr = expr.replace('\r', '')
expr += '\n'
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ')
or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor', 'enddef'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('inherit '):
return parse_inherit(tokens, name, context)
elif expr.startswith('def '):
return parse_def(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and (tokens[0][0] == 'endif'
or tokens[0][0].startswith('elif ')
or tokens[0][0] == 'else')):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for ')
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" % first,
position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('inherit ')
expr = first.split(None, 1)[1]
return ('inherit', pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith('def ')
first = first.split(None, 1)[1]
if first.endswith(':'):
first = first[:-1]
if '(' not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(')'):
raise TemplateError("Function definition doesn't end with ): %s" % first,
position=start, name=name)
else:
first = first[:-1]
func_name, sig_text = first.split('(', 1)
sig = parse_signature(sig_text, name, start)
context = context + ('def',)
content = []
while 1:
if not tokens:
raise TemplateError(
'Missing {{enddef}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'enddef'):
return ('def', start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens)
except StopIteration:
return tokenize.ENDMARKER, ''
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','):
if var_arg_type == '*':
var_arg = var_name
elif var_arg_type == '**':
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if tok_type == tokenize.OP and tok_string == '=':
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if (not nest_count and
(tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))):
default_expr = isolate_expression(sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count and tok_type == tokenize.OP and tok_string == nest_type:
nest_count += 1
elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type:
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'):
nest_type = tok_string
nest_count = 1
unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow+1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return ''.join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import pkg_resources
import os
if args is None:
args = sys.argv[1:]
dist = pkg_resources.get_distribution('Paste')
parser = optparse.OptionParser(
version=coerce_text(dist),
usage=_fill_command_usage)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--html',
dest='use_html',
action='store_true',
help="Use HTML style filling (including automatic HTML quoting)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print('You must give a template filename')
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print('Bad argument: %r' % value)
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = ''
else:
f = open(template_name, 'rb')
template_content = f.read()
f.close()
if options.use_html:
TemplateClass = HTMLTemplate
else:
TemplateClass = Template
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
f = open(options.output, 'wb')
f.write(result)
f.close()
else:
sys.stdout.write(result)
if __name__ == '__main__':
fill_command()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tempita/compat3.py 0000644 0001751 0000171 00000001607 00000000000 020533 0 ustar 00runner docker 0000000 0000000 import sys
__all__ = ['b', 'basestring_', 'bytes', 'unicode_', 'next', 'is_unicode']
if sys.version < "3":
b = bytes = str
basestring_ = basestring
unicode_ = unicode
else:
def b(s):
if isinstance(s, str):
return s.encode('latin1')
return bytes(s)
basestring_ = (bytes, str)
bytes = bytes
unicode_ = str
text = str
if sys.version < "3":
def next(obj):
return obj.next()
else:
next = next
if sys.version < "3":
def is_unicode(obj):
return isinstance(obj, unicode)
else:
def is_unicode(obj):
return isinstance(obj, str)
def coerce_text(v):
if not isinstance(v, basestring_):
if sys.version < "3":
attr = '__unicode__'
else:
attr = '__str__'
if hasattr(v, attr):
return unicode(v)
else:
return bytes(v)
return v
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/TestUtils.py 0000644 0001751 0000171 00000017453 00000000000 017530 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
import os
import unittest
import tempfile
from .Compiler import Errors
from .CodeWriter import CodeWriter
from .Compiler.TreeFragment import TreeFragment, strip_common_indent
from .Compiler.Visitor import TreeVisitor, VisitorTransform
from .Compiler import TreePath
class NodeTypeWriter(TreeVisitor):
def __init__(self):
super(NodeTypeWriter, self).__init__()
self._indents = 0
self.result = []
def visit_Node(self, node):
if not self.access_path:
name = u"(root)"
else:
tip = self.access_path[-1]
if tip[2] is not None:
name = u"%s[%d]" % tip[1:3]
else:
name = tip[1]
self.result.append(u" " * self._indents +
u"%s: %s" % (name, node.__class__.__name__))
self._indents += 1
self.visitchildren(node)
self._indents -= 1
def treetypes(root):
"""Returns a string representing the tree by class names.
There's a leading and trailing whitespace so that it can be
compared by simple string comparison while still making test
cases look ok."""
w = NodeTypeWriter()
w.visit(root)
return u"\n".join([u""] + w.result + [u""])
class CythonTest(unittest.TestCase):
def setUp(self):
self.listing_file = Errors.listing_file
self.echo_file = Errors.echo_file
Errors.listing_file = Errors.echo_file = None
def tearDown(self):
Errors.listing_file = self.listing_file
Errors.echo_file = self.echo_file
def assertLines(self, expected, result):
"Checks that the given strings or lists of strings are equal line by line"
if not isinstance(expected, list):
expected = expected.split(u"\n")
if not isinstance(result, list):
result = result.split(u"\n")
for idx, (expected_line, result_line) in enumerate(zip(expected, result)):
self.assertEqual(expected_line, result_line,
"Line %d:\nExp: %s\nGot: %s" % (idx, expected_line, result_line))
self.assertEqual(len(expected), len(result),
"Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(expected), u"\n".join(result)))
def codeToLines(self, tree):
writer = CodeWriter()
writer.write(tree)
return writer.result.lines
def codeToString(self, tree):
return "\n".join(self.codeToLines(tree))
def assertCode(self, expected, result_tree):
result_lines = self.codeToLines(result_tree)
expected_lines = strip_common_indent(expected.split("\n"))
for idx, (line, expected_line) in enumerate(zip(result_lines, expected_lines)):
self.assertEqual(expected_line, line,
"Line %d:\nGot: %s\nExp: %s" % (idx, line, expected_line))
self.assertEqual(len(result_lines), len(expected_lines),
"Unmatched lines. Got:\n%s\nExpected:\n%s" % ("\n".join(result_lines), expected))
def assertNodeExists(self, path, result_tree):
self.assertNotEqual(TreePath.find_first(result_tree, path), None,
"Path '%s' not found in result tree" % path)
def fragment(self, code, pxds=None, pipeline=None):
"Simply create a tree fragment using the name of the test-case in parse errors."
if pxds is None:
pxds = {}
if pipeline is None:
pipeline = []
name = self.id()
if name.startswith("__main__."):
name = name[len("__main__."):]
name = name.replace(".", "_")
return TreeFragment(code, name, pxds, pipeline=pipeline)
def treetypes(self, root):
return treetypes(root)
def should_fail(self, func, exc_type=Exception):
"""Calls "func" and fails if it doesn't raise the right exception
(any exception by default). Also returns the exception in question.
"""
try:
func()
self.fail("Expected an exception of type %r" % exc_type)
except exc_type as e:
self.assertTrue(isinstance(e, exc_type))
return e
def should_not_fail(self, func):
"""Calls func and succeeds if and only if no exception is raised
(i.e. converts exception raising into a failed testcase). Returns
the return value of func."""
try:
return func()
except Exception as exc:
self.fail(str(exc))
class TransformTest(CythonTest):
"""
Utility base class for transform unit tests. It is based around constructing
test trees (either explicitly or by parsing a Cython code string); running
the transform, serialize it using a customized Cython serializer (with
special markup for nodes that cannot be represented in Cython),
and do a string-comparison line-by-line of the result.
To create a test case:
- Call run_pipeline. The pipeline should at least contain the transform you
are testing; pyx should be either a string (passed to the parser to
create a post-parse tree) or a node representing input to pipeline.
The result will be a transformed result.
- Check that the tree is correct. If wanted, assertCode can be used, which
takes a code string as expected, and a ModuleNode in result_tree
(it serializes the ModuleNode to a string and compares line-by-line).
All code strings are first stripped for whitespace lines and then common
indentation.
Plans: One could have a pxd dictionary parameter to run_pipeline.
"""
def run_pipeline(self, pipeline, pyx, pxds=None):
if pxds is None:
pxds = {}
tree = self.fragment(pyx, pxds).root
# Run pipeline
for T in pipeline:
tree = T(tree)
return tree
class TreeAssertVisitor(VisitorTransform):
# actually, a TreeVisitor would be enough, but this needs to run
# as part of the compiler pipeline
def visit_CompilerDirectivesNode(self, node):
directives = node.directives
if 'test_assert_path_exists' in directives:
for path in directives['test_assert_path_exists']:
if TreePath.find_first(node, path) is None:
Errors.error(
node.pos,
"Expected path '%s' not found in result tree" % path)
if 'test_fail_if_path_exists' in directives:
for path in directives['test_fail_if_path_exists']:
if TreePath.find_first(node, path) is not None:
Errors.error(
node.pos,
"Unexpected path '%s' found in result tree" % path)
self.visitchildren(node)
return node
visit_Node = VisitorTransform.recurse_to_children
def unpack_source_tree(tree_file, dir=None):
if dir is None:
dir = tempfile.mkdtemp()
header = []
cur_file = None
f = open(tree_file)
try:
lines = f.readlines()
finally:
f.close()
del f
try:
for line in lines:
if line[:5] == '#####':
filename = line.strip().strip('#').strip().replace('/', os.path.sep)
path = os.path.join(dir, filename)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if cur_file is not None:
f, cur_file = cur_file, None
f.close()
cur_file = open(path, 'w')
elif cur_file is not None:
cur_file.write(line)
elif line.strip() and not line.lstrip().startswith('#'):
if line.strip() not in ('"""', "'''"):
header.append(line)
finally:
if cur_file is not None:
cur_file.close()
return dir, ''.join(header)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6682942
Cython-0.29.28/Cython/Tests/ 0000755 0001751 0000171 00000000000 00000000000 016306 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tests/TestCodeWriter.py 0000644 0001751 0000171 00000004414 00000000000 021572 0 ustar 00runner docker 0000000 0000000 from Cython.TestUtils import CythonTest
class TestCodeWriter(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
# Note that this test is dependent upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
# this one...
# Whitespace is very significant in this process:
# - always newline on new block (!)
# - indent 4 spaces
# - 1 space around every operator
def t(self, codestr):
self.assertCode(codestr, self.fragment(codestr).root)
def test_print(self):
self.t(u"""
print x, y
print x + y ** 2
print x, y, z,
""")
def test_if(self):
self.t(u"if x:\n pass")
def test_ifelifelse(self):
self.t(u"""
if x:
pass
elif y:
pass
elif z + 34 ** 34 - 2:
pass
else:
pass
""")
def test_def(self):
self.t(u"""
def f(x, y, z):
pass
def f(x = 34, y = 54, z):
pass
""")
def test_longness_and_signedness(self):
self.t(u"def f(unsigned long long long long long int y):\n pass")
def test_signed_short(self):
self.t(u"def f(signed short int y):\n pass")
def test_typed_args(self):
self.t(u"def f(int x, unsigned long int y):\n pass")
def test_cdef_var(self):
self.t(u"""
cdef int hello
cdef int hello = 4, x = 3, y, z
""")
def test_for_loop(self):
self.t(u"""
for x, y, z in f(g(h(34) * 2) + 23):
print x, y, z
else:
print 43
""")
def test_inplace_assignment(self):
self.t(u"x += 43")
def test_attribute(self):
self.t(u"a.x")
if __name__ == "__main__":
import unittest
unittest.main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tests/TestCythonUtils.py 0000644 0001751 0000171 00000000732 00000000000 022007 0 ustar 00runner docker 0000000 0000000 import unittest
from ..Utils import build_hex_version
class TestCythonUtils(unittest.TestCase):
def test_build_hex_version(self):
self.assertEqual('0x001D00A1', build_hex_version('0.29a1'))
self.assertEqual('0x001D00A1', build_hex_version('0.29a1'))
self.assertEqual('0x001D03C4', build_hex_version('0.29.3rc4'))
self.assertEqual('0x001D00F0', build_hex_version('0.29'))
self.assertEqual('0x040000F0', build_hex_version('4.0'))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tests/TestJediTyper.py 0000644 0001751 0000171 00000015524 00000000000 021426 0 ustar 00runner docker 0000000 0000000 # -*- coding: utf-8 -*-
# tag: jedi
from __future__ import absolute_import
import sys
import os.path
from textwrap import dedent
from contextlib import contextmanager
from tempfile import NamedTemporaryFile
from Cython.Compiler.ParseTreeTransforms import NormalizeTree, InterpretCompilerDirectives
from Cython.Compiler import Main, Symtab, Visitor
from Cython.TestUtils import TransformTest
TOOLS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', 'Tools'))
@contextmanager
def _tempfile(code):
code = dedent(code)
if not isinstance(code, bytes):
code = code.encode('utf8')
with NamedTemporaryFile(suffix='.py') as f:
f.write(code)
f.seek(0)
yield f
def _test_typing(code, inject=False):
sys.path.insert(0, TOOLS_DIR)
try:
import jedityper
finally:
sys.path.remove(TOOLS_DIR)
lines = []
with _tempfile(code) as f:
types = jedityper.analyse(f.name)
if inject:
lines = jedityper.inject_types(f.name, types)
return types, lines
class DeclarationsFinder(Visitor.VisitorTransform):
directives = None
visit_Node = Visitor.VisitorTransform.recurse_to_children
def visit_CompilerDirectivesNode(self, node):
if not self.directives:
self.directives = []
self.directives.append(node)
self.visitchildren(node)
return node
class TestJediTyper(TransformTest):
def _test(self, code):
return _test_typing(code)[0]
def test_typing_global_int_loop(self):
code = '''\
for i in range(10):
a = i + 1
'''
types = self._test(code)
self.assertIn((None, (1, 0)), types)
variables = types.pop((None, (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables)
def test_typing_function_int_loop(self):
code = '''\
def func(x):
for i in range(x):
a = i + 1
return a
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables)
def test_conflicting_types_in_function(self):
code = '''\
def func(a, b):
print(a)
a = 1
b += a
a = 'abc'
return a, str(b)
print(func(1.5, 2))
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['float', 'int', 'str']), 'b': set(['int'])}, variables)
def _test_typing_function_char_loop(self):
code = '''\
def func(x):
l = []
for c in x:
l.append(c)
return l
print(func('abcdefg'))
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['int']), 'i': set(['int'])}, variables)
def test_typing_global_list(self):
code = '''\
a = [x for x in range(10)]
b = list(range(10))
c = a + b
d = [0]*10
'''
types = self._test(code)
self.assertIn((None, (1, 0)), types)
variables = types.pop((None, (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['list']), 'b': set(['list']), 'c': set(['list']), 'd': set(['list'])}, variables)
def test_typing_function_list(self):
code = '''\
def func(x):
a = [[], []]
b = [0]* 10 + a
c = a[0]
print(func([0]*100))
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['list']), 'b': set(['list']), 'c': set(['list']), 'x': set(['list'])}, variables)
def test_typing_global_dict(self):
code = '''\
a = dict()
b = {i: i**2 for i in range(10)}
c = a
'''
types = self._test(code)
self.assertIn((None, (1, 0)), types)
variables = types.pop((None, (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['dict']), 'b': set(['dict']), 'c': set(['dict'])}, variables)
def test_typing_function_dict(self):
code = '''\
def func(x):
a = dict()
b = {i: i**2 for i in range(10)}
c = x
print(func({1:2, 'x':7}))
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['dict']), 'b': set(['dict']), 'c': set(['dict']), 'x': set(['dict'])}, variables)
def test_typing_global_set(self):
code = '''\
a = set()
# b = {i for i in range(10)} # jedi does not support set comprehension yet
c = a
d = {1,2,3}
e = a | b
'''
types = self._test(code)
self.assertIn((None, (1, 0)), types)
variables = types.pop((None, (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['set']), 'c': set(['set']), 'd': set(['set']), 'e': set(['set'])}, variables)
def test_typing_function_set(self):
code = '''\
def func(x):
a = set()
# b = {i for i in range(10)} # jedi does not support set comprehension yet
c = a
d = a | b
print(func({1,2,3}))
'''
types = self._test(code)
self.assertIn(('func', (1, 0)), types)
variables = types.pop(('func', (1, 0)))
self.assertFalse(types)
self.assertEqual({'a': set(['set']), 'c': set(['set']), 'd': set(['set']), 'x': set(['set'])}, variables)
class TestTypeInjection(TestJediTyper):
"""
Subtype of TestJediTyper that additionally tests type injection and compilation.
"""
def setUp(self):
super(TestTypeInjection, self).setUp()
compilation_options = Main.CompilationOptions(Main.default_options)
ctx = compilation_options.create_context()
transform = InterpretCompilerDirectives(ctx, ctx.compiler_directives)
transform.module_scope = Symtab.ModuleScope('__main__', None, ctx)
self.declarations_finder = DeclarationsFinder()
self.pipeline = [NormalizeTree(None), transform, self.declarations_finder]
def _test(self, code):
types, lines = _test_typing(code, inject=True)
tree = self.run_pipeline(self.pipeline, ''.join(lines))
directives = self.declarations_finder.directives
# TODO: validate directives
return types
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tests/TestStringIOTree.py 0000644 0001751 0000171 00000003632 00000000000 022042 0 ustar 00runner docker 0000000 0000000 import unittest
from Cython import StringIOTree as stringtree
code = """
cdef int spam # line 1
cdef ham():
a = 1
b = 2
c = 3
d = 4
def eggs():
pass
cpdef bacon():
print spam
print 'scotch'
print 'tea?'
print 'or coffee?' # line 16
"""
linemap = dict(enumerate(code.splitlines()))
class TestStringIOTree(unittest.TestCase):
def setUp(self):
self.tree = stringtree.StringIOTree()
def test_markers(self):
assert not self.tree.allmarkers()
def test_insertion(self):
self.write_lines((1, 2, 3))
line_4_to_6_insertion_point = self.tree.insertion_point()
self.write_lines((7, 8))
line_9_to_13_insertion_point = self.tree.insertion_point()
self.write_lines((14, 15, 16))
line_4_insertion_point = line_4_to_6_insertion_point.insertion_point()
self.write_lines((5, 6), tree=line_4_to_6_insertion_point)
line_9_to_12_insertion_point = (
line_9_to_13_insertion_point.insertion_point())
self.write_line(13, tree=line_9_to_13_insertion_point)
self.write_line(4, tree=line_4_insertion_point)
self.write_line(9, tree=line_9_to_12_insertion_point)
line_10_insertion_point = line_9_to_12_insertion_point.insertion_point()
self.write_line(11, tree=line_9_to_12_insertion_point)
self.write_line(10, tree=line_10_insertion_point)
self.write_line(12, tree=line_9_to_12_insertion_point)
self.assertEqual(self.tree.allmarkers(), list(range(1, 17)))
self.assertEqual(code.strip(), self.tree.getvalue().strip())
def write_lines(self, linenos, tree=None):
for lineno in linenos:
self.write_line(lineno, tree=tree)
def write_line(self, lineno, tree=None):
if tree is None:
tree = self.tree
tree.markers.append(lineno)
tree.write(linemap[lineno] + '\n')
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tests/__init__.py 0000644 0001751 0000171 00000000015 00000000000 020413 0 ustar 00runner docker 0000000 0000000 # empty file
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Tests/xmlrunner.py 0000644 0001751 0000171 00000034721 00000000000 020721 0 ustar 00runner docker 0000000 0000000 # -*- coding: utf-8 -*-
"""unittest-xml-reporting is a PyUnit-based TestRunner that can export test
results to XML files that can be consumed by a wide range of tools, such as
build systems, IDEs and Continuous Integration servers.
This module provides the XMLTestRunner class, which is heavily based on the
default TextTestRunner. This makes the XMLTestRunner very simple to use.
The script below, adapted from the unittest documentation, shows how to use
XMLTestRunner in a very simple way. In fact, the only difference between this
script and the original one is the last line:
import random
import unittest
import xmlrunner
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.seq = range(10)
def test_shuffle(self):
# make sure the shuffled sequence does not lose any elements
random.shuffle(self.seq)
self.seq.sort()
self.assertEqual(self.seq, range(10))
def test_choice(self):
element = random.choice(self.seq)
self.assertTrue(element in self.seq)
def test_sample(self):
self.assertRaises(ValueError, random.sample, self.seq, 20)
for element in random.sample(self.seq, 5):
self.assertTrue(element in self.seq)
if __name__ == '__main__':
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
"""
from __future__ import absolute_import
import os
import sys
import time
from unittest import TestResult, TextTestResult, TextTestRunner
import xml.dom.minidom
try:
from StringIO import StringIO
except ImportError:
from io import StringIO # doesn't accept 'str' in Py2
class XMLDocument(xml.dom.minidom.Document):
def createCDATAOrText(self, data):
if ']]>' in data:
return self.createTextNode(data)
return self.createCDATASection(data)
class _TestInfo(object):
"""This class is used to keep useful information about the execution of a
test method.
"""
# Possible test outcomes
(SUCCESS, FAILURE, ERROR) = range(3)
def __init__(self, test_result, test_method, outcome=SUCCESS, err=None):
"Create a new instance of _TestInfo."
self.test_result = test_result
self.test_method = test_method
self.outcome = outcome
self.err = err
self.stdout = test_result.stdout and test_result.stdout.getvalue().strip() or ''
self.stderr = test_result.stdout and test_result.stderr.getvalue().strip() or ''
def get_elapsed_time(self):
"""Return the time that shows how long the test method took to
execute.
"""
return self.test_result.stop_time - self.test_result.start_time
def get_description(self):
"Return a text representation of the test method."
return self.test_result.getDescription(self.test_method)
def get_error_info(self):
"""Return a text representation of an exception thrown by a test
method.
"""
if not self.err:
return ''
return self.test_result._exc_info_to_string(
self.err, self.test_method)
class _XMLTestResult(TextTestResult):
"""A test result class that can express test results in a XML report.
Used by XMLTestRunner.
"""
def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
elapsed_times=True):
"Create a new instance of _XMLTestResult."
TextTestResult.__init__(self, stream, descriptions, verbosity)
self.successes = []
self.callback = None
self.elapsed_times = elapsed_times
self.output_patched = False
def _prepare_callback(self, test_info, target_list, verbose_str,
short_str):
"""Append a _TestInfo to the given target list and sets a callback
method to be called by stopTest method.
"""
target_list.append(test_info)
def callback():
"""This callback prints the test method outcome to the stream,
as well as the elapsed time.
"""
# Ignore the elapsed times for a more reliable unit testing
if not self.elapsed_times:
self.start_time = self.stop_time = 0
if self.showAll:
self.stream.writeln('(%.3fs) %s' % \
(test_info.get_elapsed_time(), verbose_str))
elif self.dots:
self.stream.write(short_str)
self.callback = callback
def _patch_standard_output(self):
"""Replace the stdout and stderr streams with string-based streams
in order to capture the tests' output.
"""
if not self.output_patched:
(self.old_stdout, self.old_stderr) = (sys.stdout, sys.stderr)
self.output_patched = True
(sys.stdout, sys.stderr) = (self.stdout, self.stderr) = \
(StringIO(), StringIO())
def _restore_standard_output(self):
"Restore the stdout and stderr streams."
(sys.stdout, sys.stderr) = (self.old_stdout, self.old_stderr)
self.output_patched = False
def startTest(self, test):
"Called before execute each test method."
self._patch_standard_output()
self.start_time = time.time()
TestResult.startTest(self, test)
if self.showAll:
self.stream.write(' ' + self.getDescription(test))
self.stream.write(" ... ")
def stopTest(self, test):
"Called after execute each test method."
self._restore_standard_output()
TextTestResult.stopTest(self, test)
self.stop_time = time.time()
if self.callback and callable(self.callback):
self.callback()
self.callback = None
def addSuccess(self, test):
"Called when a test executes successfully."
self._prepare_callback(_TestInfo(self, test),
self.successes, 'OK', '.')
def addFailure(self, test, err):
"Called when a test method fails."
self._prepare_callback(_TestInfo(self, test, _TestInfo.FAILURE, err),
self.failures, 'FAIL', 'F')
def addError(self, test, err):
"Called when a test method raises an error."
self._prepare_callback(_TestInfo(self, test, _TestInfo.ERROR, err),
self.errors, 'ERROR', 'E')
def printErrorList(self, flavour, errors):
"Write some information about the FAIL or ERROR to the stream."
for test_info in errors:
if isinstance(test_info, tuple):
test_info, exc_info = test_info
try:
t = test_info.get_elapsed_time()
except AttributeError:
t = 0
try:
descr = test_info.get_description()
except AttributeError:
try:
descr = test_info.getDescription()
except AttributeError:
descr = str(test_info)
try:
err_info = test_info.get_error_info()
except AttributeError:
err_info = str(test_info)
self.stream.writeln(self.separator1)
self.stream.writeln('%s [%.3fs]: %s' % (flavour, t, descr))
self.stream.writeln(self.separator2)
self.stream.writeln('%s' % err_info)
def _get_info_by_testcase(self):
"""This method organizes test results by TestCase module. This
information is used during the report generation, where a XML report
will be generated for each TestCase.
"""
tests_by_testcase = {}
for tests in (self.successes, self.failures, self.errors):
for test_info in tests:
if not isinstance(test_info, _TestInfo):
print("Unexpected test result type: %r" % (test_info,))
continue
testcase = type(test_info.test_method)
# Ignore module name if it is '__main__'
module = testcase.__module__ + '.'
if module == '__main__.':
module = ''
testcase_name = module + testcase.__name__
if testcase_name not in tests_by_testcase:
tests_by_testcase[testcase_name] = []
tests_by_testcase[testcase_name].append(test_info)
return tests_by_testcase
def _report_testsuite(suite_name, tests, xml_document):
"Appends the testsuite section to the XML document."
testsuite = xml_document.createElement('testsuite')
xml_document.appendChild(testsuite)
testsuite.setAttribute('name', str(suite_name))
testsuite.setAttribute('tests', str(len(tests)))
testsuite.setAttribute('time', '%.3f' %
sum([e.get_elapsed_time() for e in tests]))
failures = len([1 for e in tests if e.outcome == _TestInfo.FAILURE])
testsuite.setAttribute('failures', str(failures))
errors = len([1 for e in tests if e.outcome == _TestInfo.ERROR])
testsuite.setAttribute('errors', str(errors))
return testsuite
_report_testsuite = staticmethod(_report_testsuite)
def _report_testcase(suite_name, test_result, xml_testsuite, xml_document):
"Appends a testcase section to the XML document."
testcase = xml_document.createElement('testcase')
xml_testsuite.appendChild(testcase)
testcase.setAttribute('classname', str(suite_name))
testcase.setAttribute('name', test_result.test_method.shortDescription()
or getattr(test_result.test_method, '_testMethodName',
str(test_result.test_method)))
testcase.setAttribute('time', '%.3f' % test_result.get_elapsed_time())
if (test_result.outcome != _TestInfo.SUCCESS):
elem_name = ('failure', 'error')[test_result.outcome-1]
failure = xml_document.createElement(elem_name)
testcase.appendChild(failure)
failure.setAttribute('type', str(test_result.err[0].__name__))
failure.setAttribute('message', str(test_result.err[1]))
error_info = test_result.get_error_info()
failureText = xml_document.createCDATAOrText(error_info)
failure.appendChild(failureText)
_report_testcase = staticmethod(_report_testcase)
def _report_output(test_runner, xml_testsuite, xml_document, stdout, stderr):
"Appends the system-out and system-err sections to the XML document."
systemout = xml_document.createElement('system-out')
xml_testsuite.appendChild(systemout)
systemout_text = xml_document.createCDATAOrText(stdout)
systemout.appendChild(systemout_text)
systemerr = xml_document.createElement('system-err')
xml_testsuite.appendChild(systemerr)
systemerr_text = xml_document.createCDATAOrText(stderr)
systemerr.appendChild(systemerr_text)
_report_output = staticmethod(_report_output)
def generate_reports(self, test_runner):
"Generates the XML reports to a given XMLTestRunner object."
all_results = self._get_info_by_testcase()
if type(test_runner.output) == str and not \
os.path.exists(test_runner.output):
os.makedirs(test_runner.output)
for suite, tests in all_results.items():
doc = XMLDocument()
# Build the XML file
testsuite = _XMLTestResult._report_testsuite(suite, tests, doc)
stdout, stderr = [], []
for test in tests:
_XMLTestResult._report_testcase(suite, test, testsuite, doc)
if test.stdout:
stdout.extend(['*****************', test.get_description(), test.stdout])
if test.stderr:
stderr.extend(['*****************', test.get_description(), test.stderr])
_XMLTestResult._report_output(test_runner, testsuite, doc,
'\n'.join(stdout), '\n'.join(stderr))
xml_content = doc.toprettyxml(indent='\t')
if type(test_runner.output) is str:
report_file = open('%s%sTEST-%s.xml' % \
(test_runner.output, os.sep, suite), 'w')
try:
report_file.write(xml_content)
finally:
report_file.close()
else:
# Assume that test_runner.output is a stream
test_runner.output.write(xml_content)
class XMLTestRunner(TextTestRunner):
"""A test runner class that outputs the results in JUnit like XML files.
"""
def __init__(self, output='.', stream=None, descriptions=True, verbose=False, elapsed_times=True):
"Create a new instance of XMLTestRunner."
if stream is None:
stream = sys.stderr
verbosity = (1, 2)[verbose]
TextTestRunner.__init__(self, stream, descriptions, verbosity)
self.output = output
self.elapsed_times = elapsed_times
def _make_result(self):
"""Create the TestResult object which will be used to store
information about the executed tests.
"""
return _XMLTestResult(self.stream, self.descriptions, \
self.verbosity, self.elapsed_times)
def run(self, test):
"Run the given test case or test suite."
# Prepare the test execution
result = self._make_result()
# Print a nice header
self.stream.writeln()
self.stream.writeln('Running tests...')
self.stream.writeln(result.separator2)
# Execute tests
start_time = time.time()
test(result)
stop_time = time.time()
time_taken = stop_time - start_time
# Generate reports
self.stream.writeln()
self.stream.writeln('Generating XML reports...')
result.generate_reports(self)
# Print results
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", time_taken))
self.stream.writeln()
# Error traces
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = (len(result.failures), len(result.errors))
if failed:
self.stream.write("failures=%d" % failed)
if errored:
if failed:
self.stream.write(", ")
self.stream.write("errors=%d" % errored)
self.stream.writeln(")")
else:
self.stream.writeln("OK")
return result
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6682942
Cython-0.29.28/Cython/Utility/ 0000755 0001751 0000171 00000000000 00000000000 016647 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Utility/AsyncGen.c 0000644 0001751 0000171 00000120077 00000000000 020531 0 ustar 00runner docker 0000000 0000000 // This is copied from genobject.c in CPython 3.6.
// Try to keep it in sync by doing this from time to time:
// sed -e 's|__pyx_||ig' Cython/Utility/AsyncGen.c | diff -udw - cpython/Objects/genobject.c | less
//////////////////// AsyncGenerator.proto ////////////////////
//@requires: Coroutine.c::Coroutine
#define __Pyx_AsyncGen_USED
typedef struct {
__pyx_CoroutineObject coro;
PyObject *ag_finalizer;
int ag_hooks_inited;
int ag_closed;
} __pyx_PyAsyncGenObject;
static PyTypeObject *__pyx__PyAsyncGenWrappedValueType = 0;
static PyTypeObject *__pyx__PyAsyncGenASendType = 0;
static PyTypeObject *__pyx__PyAsyncGenAThrowType = 0;
static PyTypeObject *__pyx_AsyncGenType = 0;
#define __Pyx_AsyncGen_CheckExact(obj) (Py_TYPE(obj) == __pyx_AsyncGenType)
#define __pyx_PyAsyncGenASend_CheckExact(o) \
(Py_TYPE(o) == __pyx__PyAsyncGenASendType)
#define __pyx_PyAsyncGenAThrow_CheckExact(o) \
(Py_TYPE(o) == __pyx__PyAsyncGenAThrowType)
static PyObject *__Pyx_async_gen_anext(PyObject *o);
static CYTHON_INLINE PyObject *__Pyx_async_gen_asend_iternext(PyObject *o);
static PyObject *__Pyx_async_gen_asend_send(PyObject *o, PyObject *arg);
static PyObject *__Pyx_async_gen_asend_close(PyObject *o, PyObject *args);
static PyObject *__Pyx_async_gen_athrow_close(PyObject *o, PyObject *args);
static PyObject *__Pyx__PyAsyncGenValueWrapperNew(PyObject *val);
static __pyx_CoroutineObject *__Pyx_AsyncGen_New(
__pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
__pyx_PyAsyncGenObject *gen = PyObject_GC_New(__pyx_PyAsyncGenObject, __pyx_AsyncGenType);
if (unlikely(!gen))
return NULL;
gen->ag_finalizer = NULL;
gen->ag_closed = 0;
gen->ag_hooks_inited = 0;
return __Pyx__Coroutine_NewInit((__pyx_CoroutineObject*)gen, body, code, closure, name, qualname, module_name);
}
static int __pyx_AsyncGen_init(void);
static void __Pyx_PyAsyncGen_Fini(void);
//////////////////// AsyncGenerator.cleanup ////////////////////
__Pyx_PyAsyncGen_Fini();
//////////////////// AsyncGeneratorInitFinalizer ////////////////////
// this is separated out because it needs more adaptation
#if PY_VERSION_HEX < 0x030600B0
static int __Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o) {
#if 0
// TODO: implement finalizer support in older Python versions
PyThreadState *tstate;
PyObject *finalizer;
PyObject *firstiter;
#endif
if (likely(o->ag_hooks_inited)) {
return 0;
}
o->ag_hooks_inited = 1;
#if 0
tstate = __Pyx_PyThreadState_Current;
finalizer = tstate->async_gen_finalizer;
if (finalizer) {
Py_INCREF(finalizer);
o->ag_finalizer = finalizer;
}
firstiter = tstate->async_gen_firstiter;
if (firstiter) {
PyObject *res;
Py_INCREF(firstiter);
res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o);
Py_DECREF(firstiter);
if (res == NULL) {
return 1;
}
Py_DECREF(res);
}
#endif
return 0;
}
#endif
//////////////////// AsyncGenerator ////////////////////
//@requires: AsyncGeneratorInitFinalizer
//@requires: Coroutine.c::Coroutine
//@requires: Coroutine.c::ReturnWithStopIteration
//@requires: ObjectHandling.c::PyObjectCall2Args
//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
PyDoc_STRVAR(__Pyx_async_gen_send_doc,
"send(arg) -> send 'arg' into generator,\n\
return next yielded value or raise StopIteration.");
PyDoc_STRVAR(__Pyx_async_gen_close_doc,
"close() -> raise GeneratorExit inside generator.");
PyDoc_STRVAR(__Pyx_async_gen_throw_doc,
"throw(typ[,val[,tb]]) -> raise exception in generator,\n\
return next yielded value or raise StopIteration.");
PyDoc_STRVAR(__Pyx_async_gen_await_doc,
"__await__() -> return a representation that can be passed into the 'await' expression.");
// COPY STARTS HERE:
static PyObject *__Pyx_async_gen_asend_new(__pyx_PyAsyncGenObject *, PyObject *);
static PyObject *__Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *, PyObject *);
static const char *__Pyx_NON_INIT_CORO_MSG = "can't send non-None value to a just-started coroutine";
static const char *__Pyx_ASYNC_GEN_IGNORED_EXIT_MSG = "async generator ignored GeneratorExit";
typedef enum {
__PYX_AWAITABLE_STATE_INIT, /* new awaitable, has not yet been iterated */
__PYX_AWAITABLE_STATE_ITER, /* being iterated */
__PYX_AWAITABLE_STATE_CLOSED, /* closed */
} __pyx_AwaitableState;
typedef struct {
PyObject_HEAD
__pyx_PyAsyncGenObject *ags_gen;
/* Can be NULL, when in the __anext__() mode (equivalent of "asend(None)") */
PyObject *ags_sendval;
__pyx_AwaitableState ags_state;
} __pyx_PyAsyncGenASend;
typedef struct {
PyObject_HEAD
__pyx_PyAsyncGenObject *agt_gen;
/* Can be NULL, when in the "aclose()" mode (equivalent of "athrow(GeneratorExit)") */
PyObject *agt_args;
__pyx_AwaitableState agt_state;
} __pyx_PyAsyncGenAThrow;
typedef struct {
PyObject_HEAD
PyObject *agw_val;
} __pyx__PyAsyncGenWrappedValue;
#ifndef _PyAsyncGen_MAXFREELIST
#define _PyAsyncGen_MAXFREELIST 80
#endif
// Freelists boost performance 6-10%; they also reduce memory
// fragmentation, as _PyAsyncGenWrappedValue and PyAsyncGenASend
// are short-living objects that are instantiated for every
// __anext__ call.
static __pyx__PyAsyncGenWrappedValue *__Pyx_ag_value_freelist[_PyAsyncGen_MAXFREELIST];
static int __Pyx_ag_value_freelist_free = 0;
static __pyx_PyAsyncGenASend *__Pyx_ag_asend_freelist[_PyAsyncGen_MAXFREELIST];
static int __Pyx_ag_asend_freelist_free = 0;
#define __pyx__PyAsyncGenWrappedValue_CheckExact(o) \
(Py_TYPE(o) == __pyx__PyAsyncGenWrappedValueType)
static int
__Pyx_async_gen_traverse(__pyx_PyAsyncGenObject *gen, visitproc visit, void *arg)
{
Py_VISIT(gen->ag_finalizer);
return __Pyx_Coroutine_traverse((__pyx_CoroutineObject*)gen, visit, arg);
}
static PyObject *
__Pyx_async_gen_repr(__pyx_CoroutineObject *o)
{
// avoid NULL pointer dereference for qualname during garbage collection
return PyUnicode_FromFormat("",
o->gi_qualname ? o->gi_qualname : Py_None, o);
}
#if PY_VERSION_HEX >= 0x030600B0
static int
__Pyx_async_gen_init_hooks(__pyx_PyAsyncGenObject *o)
{
PyThreadState *tstate;
PyObject *finalizer;
PyObject *firstiter;
if (o->ag_hooks_inited) {
return 0;
}
o->ag_hooks_inited = 1;
tstate = __Pyx_PyThreadState_Current;
finalizer = tstate->async_gen_finalizer;
if (finalizer) {
Py_INCREF(finalizer);
o->ag_finalizer = finalizer;
}
firstiter = tstate->async_gen_firstiter;
if (firstiter) {
PyObject *res;
#if CYTHON_UNPACK_METHODS
PyObject *self;
#endif
Py_INCREF(firstiter);
// at least asyncio stores methods here => optimise the call
#if CYTHON_UNPACK_METHODS
if (likely(PyMethod_Check(firstiter)) && likely((self = PyMethod_GET_SELF(firstiter)) != NULL)) {
PyObject *function = PyMethod_GET_FUNCTION(firstiter);
res = __Pyx_PyObject_Call2Args(function, self, (PyObject*)o);
} else
#endif
res = __Pyx_PyObject_CallOneArg(firstiter, (PyObject*)o);
Py_DECREF(firstiter);
if (unlikely(res == NULL)) {
return 1;
}
Py_DECREF(res);
}
return 0;
}
#endif
static PyObject *
__Pyx_async_gen_anext(PyObject *g)
{
__pyx_PyAsyncGenObject *o = (__pyx_PyAsyncGenObject*) g;
if (__Pyx_async_gen_init_hooks(o)) {
return NULL;
}
return __Pyx_async_gen_asend_new(o, NULL);
}
static PyObject *
__Pyx_async_gen_anext_method(PyObject *g, CYTHON_UNUSED PyObject *arg) {
return __Pyx_async_gen_anext(g);
}
static PyObject *
__Pyx_async_gen_asend(__pyx_PyAsyncGenObject *o, PyObject *arg)
{
if (__Pyx_async_gen_init_hooks(o)) {
return NULL;
}
return __Pyx_async_gen_asend_new(o, arg);
}
static PyObject *
__Pyx_async_gen_aclose(__pyx_PyAsyncGenObject *o, CYTHON_UNUSED PyObject *arg)
{
if (__Pyx_async_gen_init_hooks(o)) {
return NULL;
}
return __Pyx_async_gen_athrow_new(o, NULL);
}
static PyObject *
__Pyx_async_gen_athrow(__pyx_PyAsyncGenObject *o, PyObject *args)
{
if (__Pyx_async_gen_init_hooks(o)) {
return NULL;
}
return __Pyx_async_gen_athrow_new(o, args);
}
static PyObject *
__Pyx_async_gen_self_method(PyObject *g, CYTHON_UNUSED PyObject *arg) {
return __Pyx_NewRef(g);
}
static PyGetSetDef __Pyx_async_gen_getsetlist[] = {
{(char*) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
(char*) PyDoc_STR("name of the async generator"), 0},
{(char*) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
(char*) PyDoc_STR("qualified name of the async generator"), 0},
//REMOVED: {(char*) "ag_await", (getter)coro_get_cr_await, NULL,
//REMOVED: (char*) PyDoc_STR("object being awaited on, or None")},
{0, 0, 0, 0, 0} /* Sentinel */
};
static PyMemberDef __Pyx_async_gen_memberlist[] = {
//REMOVED: {(char*) "ag_frame", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_frame), READONLY},
{(char*) "ag_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
//REMOVED: {(char*) "ag_code", T_OBJECT, offsetof(__pyx_PyAsyncGenObject, ag_code), READONLY},
//ADDED: "ag_await"
{(char*) "ag_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being awaited on, or None")},
{0, 0, 0, 0, 0} /* Sentinel */
};
PyDoc_STRVAR(__Pyx_async_aclose_doc,
"aclose() -> raise GeneratorExit inside generator.");
PyDoc_STRVAR(__Pyx_async_asend_doc,
"asend(v) -> send 'v' in generator.");
PyDoc_STRVAR(__Pyx_async_athrow_doc,
"athrow(typ[,val[,tb]]) -> raise exception in generator.");
PyDoc_STRVAR(__Pyx_async_aiter_doc,
"__aiter__(v) -> return an asynchronous iterator.");
PyDoc_STRVAR(__Pyx_async_anext_doc,
"__anext__(v) -> continue asynchronous iteration and return the next element.");
static PyMethodDef __Pyx_async_gen_methods[] = {
{"asend", (PyCFunction)__Pyx_async_gen_asend, METH_O, __Pyx_async_asend_doc},
{"athrow",(PyCFunction)__Pyx_async_gen_athrow, METH_VARARGS, __Pyx_async_athrow_doc},
{"aclose", (PyCFunction)__Pyx_async_gen_aclose, METH_NOARGS, __Pyx_async_aclose_doc},
{"__aiter__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_aiter_doc},
{"__anext__", (PyCFunction)__Pyx_async_gen_anext_method, METH_NOARGS, __Pyx_async_anext_doc},
{0, 0, 0, 0} /* Sentinel */
};
#if CYTHON_USE_ASYNC_SLOTS
static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_as_async = {
0, /* am_await */
PyObject_SelfIter, /* am_aiter */
(unaryfunc)__Pyx_async_gen_anext, /* am_anext */
#if PY_VERSION_HEX >= 0x030A00A3
0, /*am_send*/
#endif
};
#endif
static PyTypeObject __pyx_AsyncGenType_type = {
PyVarObject_HEAD_INIT(0, 0)
"async_generator", /* tp_name */
sizeof(__pyx_PyAsyncGenObject), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)__Pyx_Coroutine_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
#if CYTHON_USE_ASYNC_SLOTS
&__Pyx_async_gen_as_async, /* tp_as_async */
#else
0, /*tp_reserved*/
#endif
(reprfunc)__Pyx_async_gen_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC |
Py_TPFLAGS_HAVE_FINALIZE, /* tp_flags */
0, /* tp_doc */
(traverseproc)__Pyx_async_gen_traverse, /* tp_traverse */
0, /* tp_clear */
#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
// in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
__Pyx_Coroutine_compare, /*tp_richcompare*/
#else
0, /*tp_richcompare*/
#endif
offsetof(__pyx_CoroutineObject, gi_weakreflist), /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
__Pyx_async_gen_methods, /* tp_methods */
__Pyx_async_gen_memberlist, /* tp_members */
__Pyx_async_gen_getsetlist, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
0, /* tp_new */
0, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
#if CYTHON_USE_TP_FINALIZE
0, /*tp_del*/
#else
__Pyx_Coroutine_del, /*tp_del*/
#endif
0, /* tp_version_tag */
#if CYTHON_USE_TP_FINALIZE
__Pyx_Coroutine_del, /* tp_finalize */
#elif PY_VERSION_HEX >= 0x030400a1
0, /* tp_finalize */
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static int
__Pyx_PyAsyncGen_ClearFreeLists(void)
{
int ret = __Pyx_ag_value_freelist_free + __Pyx_ag_asend_freelist_free;
while (__Pyx_ag_value_freelist_free) {
__pyx__PyAsyncGenWrappedValue *o;
o = __Pyx_ag_value_freelist[--__Pyx_ag_value_freelist_free];
assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o));
PyObject_GC_Del(o);
}
while (__Pyx_ag_asend_freelist_free) {
__pyx_PyAsyncGenASend *o;
o = __Pyx_ag_asend_freelist[--__Pyx_ag_asend_freelist_free];
assert(Py_TYPE(o) == __pyx__PyAsyncGenASendType);
PyObject_GC_Del(o);
}
return ret;
}
static void
__Pyx_PyAsyncGen_Fini(void)
{
__Pyx_PyAsyncGen_ClearFreeLists();
}
static PyObject *
__Pyx_async_gen_unwrap_value(__pyx_PyAsyncGenObject *gen, PyObject *result)
{
if (result == NULL) {
PyObject *exc_type = PyErr_Occurred();
if (!exc_type) {
PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration);
gen->ag_closed = 1;
} else if (__Pyx_PyErr_GivenExceptionMatches2(exc_type, __Pyx_PyExc_StopAsyncIteration, PyExc_GeneratorExit)) {
gen->ag_closed = 1;
}
return NULL;
}
if (__pyx__PyAsyncGenWrappedValue_CheckExact(result)) {
/* async yield */
__Pyx_ReturnWithStopIteration(((__pyx__PyAsyncGenWrappedValue*)result)->agw_val);
Py_DECREF(result);
return NULL;
}
return result;
}
/* ---------- Async Generator ASend Awaitable ------------ */
static void
__Pyx_async_gen_asend_dealloc(__pyx_PyAsyncGenASend *o)
{
PyObject_GC_UnTrack((PyObject *)o);
Py_CLEAR(o->ags_gen);
Py_CLEAR(o->ags_sendval);
if (__Pyx_ag_asend_freelist_free < _PyAsyncGen_MAXFREELIST) {
assert(__pyx_PyAsyncGenASend_CheckExact(o));
__Pyx_ag_asend_freelist[__Pyx_ag_asend_freelist_free++] = o;
} else {
PyObject_GC_Del(o);
}
}
static int
__Pyx_async_gen_asend_traverse(__pyx_PyAsyncGenASend *o, visitproc visit, void *arg)
{
Py_VISIT(o->ags_gen);
Py_VISIT(o->ags_sendval);
return 0;
}
static PyObject *
__Pyx_async_gen_asend_send(PyObject *g, PyObject *arg)
{
__pyx_PyAsyncGenASend *o = (__pyx_PyAsyncGenASend*) g;
PyObject *result;
if (unlikely(o->ags_state == __PYX_AWAITABLE_STATE_CLOSED)) {
PyErr_SetNone(PyExc_StopIteration);
return NULL;
}
if (o->ags_state == __PYX_AWAITABLE_STATE_INIT) {
if (arg == NULL || arg == Py_None) {
arg = o->ags_sendval ? o->ags_sendval : Py_None;
}
o->ags_state = __PYX_AWAITABLE_STATE_ITER;
}
result = __Pyx_Coroutine_Send((PyObject*)o->ags_gen, arg);
result = __Pyx_async_gen_unwrap_value(o->ags_gen, result);
if (result == NULL) {
o->ags_state = __PYX_AWAITABLE_STATE_CLOSED;
}
return result;
}
static CYTHON_INLINE PyObject *
__Pyx_async_gen_asend_iternext(PyObject *o)
{
return __Pyx_async_gen_asend_send(o, Py_None);
}
static PyObject *
__Pyx_async_gen_asend_throw(__pyx_PyAsyncGenASend *o, PyObject *args)
{
PyObject *result;
if (unlikely(o->ags_state == __PYX_AWAITABLE_STATE_CLOSED)) {
PyErr_SetNone(PyExc_StopIteration);
return NULL;
}
result = __Pyx_Coroutine_Throw((PyObject*)o->ags_gen, args);
result = __Pyx_async_gen_unwrap_value(o->ags_gen, result);
if (result == NULL) {
o->ags_state = __PYX_AWAITABLE_STATE_CLOSED;
}
return result;
}
static PyObject *
__Pyx_async_gen_asend_close(PyObject *g, CYTHON_UNUSED PyObject *args)
{
__pyx_PyAsyncGenASend *o = (__pyx_PyAsyncGenASend*) g;
o->ags_state = __PYX_AWAITABLE_STATE_CLOSED;
Py_RETURN_NONE;
}
static PyMethodDef __Pyx_async_gen_asend_methods[] = {
{"send", (PyCFunction)__Pyx_async_gen_asend_send, METH_O, __Pyx_async_gen_send_doc},
{"throw", (PyCFunction)__Pyx_async_gen_asend_throw, METH_VARARGS, __Pyx_async_gen_throw_doc},
{"close", (PyCFunction)__Pyx_async_gen_asend_close, METH_NOARGS, __Pyx_async_gen_close_doc},
{"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc},
{0, 0, 0, 0} /* Sentinel */
};
#if CYTHON_USE_ASYNC_SLOTS
static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_asend_as_async = {
PyObject_SelfIter, /* am_await */
0, /* am_aiter */
0, /* am_anext */
#if PY_VERSION_HEX >= 0x030A00A3
0, /*am_send*/
#endif
};
#endif
static PyTypeObject __pyx__PyAsyncGenASendType_type = {
PyVarObject_HEAD_INIT(0, 0)
"async_generator_asend", /* tp_name */
sizeof(__pyx_PyAsyncGenASend), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
(destructor)__Pyx_async_gen_asend_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
#if CYTHON_USE_ASYNC_SLOTS
&__Pyx_async_gen_asend_as_async, /* tp_as_async */
#else
0, /*tp_reserved*/
#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
(traverseproc)__Pyx_async_gen_asend_traverse, /* tp_traverse */
0, /* tp_clear */
#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
// in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
__Pyx_Coroutine_compare, /*tp_richcompare*/
#else
0, /*tp_richcompare*/
#endif
0, /* tp_weaklistoffset */
PyObject_SelfIter, /* tp_iter */
(iternextfunc)__Pyx_async_gen_asend_iternext, /* tp_iternext */
__Pyx_async_gen_asend_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
0, /* tp_new */
0, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
0, /* tp_version_tag */
#if PY_VERSION_HEX >= 0x030400a1
0, /* tp_finalize */
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static PyObject *
__Pyx_async_gen_asend_new(__pyx_PyAsyncGenObject *gen, PyObject *sendval)
{
__pyx_PyAsyncGenASend *o;
if (__Pyx_ag_asend_freelist_free) {
__Pyx_ag_asend_freelist_free--;
o = __Pyx_ag_asend_freelist[__Pyx_ag_asend_freelist_free];
_Py_NewReference((PyObject *)o);
} else {
o = PyObject_GC_New(__pyx_PyAsyncGenASend, __pyx__PyAsyncGenASendType);
if (o == NULL) {
return NULL;
}
}
Py_INCREF(gen);
o->ags_gen = gen;
Py_XINCREF(sendval);
o->ags_sendval = sendval;
o->ags_state = __PYX_AWAITABLE_STATE_INIT;
PyObject_GC_Track((PyObject*)o);
return (PyObject*)o;
}
/* ---------- Async Generator Value Wrapper ------------ */
static void
__Pyx_async_gen_wrapped_val_dealloc(__pyx__PyAsyncGenWrappedValue *o)
{
PyObject_GC_UnTrack((PyObject *)o);
Py_CLEAR(o->agw_val);
if (__Pyx_ag_value_freelist_free < _PyAsyncGen_MAXFREELIST) {
assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o));
__Pyx_ag_value_freelist[__Pyx_ag_value_freelist_free++] = o;
} else {
PyObject_GC_Del(o);
}
}
static int
__Pyx_async_gen_wrapped_val_traverse(__pyx__PyAsyncGenWrappedValue *o,
visitproc visit, void *arg)
{
Py_VISIT(o->agw_val);
return 0;
}
static PyTypeObject __pyx__PyAsyncGenWrappedValueType_type = {
PyVarObject_HEAD_INIT(0, 0)
"async_generator_wrapped_value", /* tp_name */
sizeof(__pyx__PyAsyncGenWrappedValue), /* tp_basicsize */
0, /* tp_itemsize */
/* methods */
(destructor)__Pyx_async_gen_wrapped_val_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
0, /* tp_as_async */
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
(traverseproc)__Pyx_async_gen_wrapped_val_traverse, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
0, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
0, /* tp_new */
0, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
0, /* tp_version_tag */
#if PY_VERSION_HEX >= 0x030400a1
0, /* tp_finalize */
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static PyObject *
__Pyx__PyAsyncGenValueWrapperNew(PyObject *val)
{
// NOTE: steals a reference to val !
__pyx__PyAsyncGenWrappedValue *o;
assert(val);
if (__Pyx_ag_value_freelist_free) {
__Pyx_ag_value_freelist_free--;
o = __Pyx_ag_value_freelist[__Pyx_ag_value_freelist_free];
assert(__pyx__PyAsyncGenWrappedValue_CheckExact(o));
_Py_NewReference((PyObject*)o);
} else {
o = PyObject_GC_New(__pyx__PyAsyncGenWrappedValue, __pyx__PyAsyncGenWrappedValueType);
if (unlikely(!o)) {
Py_DECREF(val);
return NULL;
}
}
o->agw_val = val;
// no Py_INCREF(val) - steals reference!
PyObject_GC_Track((PyObject*)o);
return (PyObject*)o;
}
/* ---------- Async Generator AThrow awaitable ------------ */
static void
__Pyx_async_gen_athrow_dealloc(__pyx_PyAsyncGenAThrow *o)
{
PyObject_GC_UnTrack((PyObject *)o);
Py_CLEAR(o->agt_gen);
Py_CLEAR(o->agt_args);
PyObject_GC_Del(o);
}
static int
__Pyx_async_gen_athrow_traverse(__pyx_PyAsyncGenAThrow *o, visitproc visit, void *arg)
{
Py_VISIT(o->agt_gen);
Py_VISIT(o->agt_args);
return 0;
}
static PyObject *
__Pyx_async_gen_athrow_send(__pyx_PyAsyncGenAThrow *o, PyObject *arg)
{
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject*)o->agt_gen;
PyObject *retval;
if (o->agt_state == __PYX_AWAITABLE_STATE_CLOSED) {
PyErr_SetNone(PyExc_StopIteration);
return NULL;
}
if (o->agt_state == __PYX_AWAITABLE_STATE_INIT) {
if (o->agt_gen->ag_closed) {
PyErr_SetNone(PyExc_StopIteration);
return NULL;
}
if (arg != Py_None) {
PyErr_SetString(PyExc_RuntimeError, __Pyx_NON_INIT_CORO_MSG);
return NULL;
}
o->agt_state = __PYX_AWAITABLE_STATE_ITER;
if (o->agt_args == NULL) {
/* aclose() mode */
o->agt_gen->ag_closed = 1;
retval = __Pyx__Coroutine_Throw((PyObject*)gen,
/* Do not close generator when
PyExc_GeneratorExit is passed */
PyExc_GeneratorExit, NULL, NULL, NULL, 0);
if (retval && __pyx__PyAsyncGenWrappedValue_CheckExact(retval)) {
Py_DECREF(retval);
goto yield_close;
}
} else {
PyObject *typ;
PyObject *tb = NULL;
PyObject *val = NULL;
if (!PyArg_UnpackTuple(o->agt_args, "athrow", 1, 3,
&typ, &val, &tb)) {
return NULL;
}
retval = __Pyx__Coroutine_Throw((PyObject*)gen,
/* Do not close generator when PyExc_GeneratorExit is passed */
typ, val, tb, o->agt_args, 0);
retval = __Pyx_async_gen_unwrap_value(o->agt_gen, retval);
}
if (retval == NULL) {
goto check_error;
}
return retval;
}
assert (o->agt_state == __PYX_AWAITABLE_STATE_ITER);
retval = __Pyx_Coroutine_Send((PyObject *)gen, arg);
if (o->agt_args) {
return __Pyx_async_gen_unwrap_value(o->agt_gen, retval);
} else {
/* aclose() mode */
if (retval) {
if (__pyx__PyAsyncGenWrappedValue_CheckExact(retval)) {
Py_DECREF(retval);
goto yield_close;
}
else {
return retval;
}
}
else {
goto check_error;
}
}
yield_close:
PyErr_SetString(
PyExc_RuntimeError, __Pyx_ASYNC_GEN_IGNORED_EXIT_MSG);
return NULL;
check_error:
if (PyErr_ExceptionMatches(__Pyx_PyExc_StopAsyncIteration)) {
o->agt_state = __PYX_AWAITABLE_STATE_CLOSED;
if (o->agt_args == NULL) {
// when aclose() is called we don't want to propagate
// StopAsyncIteration; just raise StopIteration, signalling
// that 'aclose()' is done.
PyErr_Clear();
PyErr_SetNone(PyExc_StopIteration);
}
}
else if (PyErr_ExceptionMatches(PyExc_GeneratorExit)) {
o->agt_state = __PYX_AWAITABLE_STATE_CLOSED;
PyErr_Clear(); /* ignore these errors */
PyErr_SetNone(PyExc_StopIteration);
}
return NULL;
}
static PyObject *
__Pyx_async_gen_athrow_throw(__pyx_PyAsyncGenAThrow *o, PyObject *args)
{
PyObject *retval;
if (o->agt_state == __PYX_AWAITABLE_STATE_INIT) {
PyErr_SetString(PyExc_RuntimeError, __Pyx_NON_INIT_CORO_MSG);
return NULL;
}
if (o->agt_state == __PYX_AWAITABLE_STATE_CLOSED) {
PyErr_SetNone(PyExc_StopIteration);
return NULL;
}
retval = __Pyx_Coroutine_Throw((PyObject*)o->agt_gen, args);
if (o->agt_args) {
return __Pyx_async_gen_unwrap_value(o->agt_gen, retval);
} else {
/* aclose() mode */
if (retval && __pyx__PyAsyncGenWrappedValue_CheckExact(retval)) {
Py_DECREF(retval);
PyErr_SetString(PyExc_RuntimeError, __Pyx_ASYNC_GEN_IGNORED_EXIT_MSG);
return NULL;
}
return retval;
}
}
static PyObject *
__Pyx_async_gen_athrow_iternext(__pyx_PyAsyncGenAThrow *o)
{
return __Pyx_async_gen_athrow_send(o, Py_None);
}
static PyObject *
__Pyx_async_gen_athrow_close(PyObject *g, CYTHON_UNUSED PyObject *args)
{
__pyx_PyAsyncGenAThrow *o = (__pyx_PyAsyncGenAThrow*) g;
o->agt_state = __PYX_AWAITABLE_STATE_CLOSED;
Py_RETURN_NONE;
}
static PyMethodDef __Pyx_async_gen_athrow_methods[] = {
{"send", (PyCFunction)__Pyx_async_gen_athrow_send, METH_O, __Pyx_async_gen_send_doc},
{"throw", (PyCFunction)__Pyx_async_gen_athrow_throw, METH_VARARGS, __Pyx_async_gen_throw_doc},
{"close", (PyCFunction)__Pyx_async_gen_athrow_close, METH_NOARGS, __Pyx_async_gen_close_doc},
{"__await__", (PyCFunction)__Pyx_async_gen_self_method, METH_NOARGS, __Pyx_async_gen_await_doc},
{0, 0, 0, 0} /* Sentinel */
};
#if CYTHON_USE_ASYNC_SLOTS
static __Pyx_PyAsyncMethodsStruct __Pyx_async_gen_athrow_as_async = {
PyObject_SelfIter, /* am_await */
0, /* am_aiter */
0, /* am_anext */
#if PY_VERSION_HEX >= 0x030A00A3
0, /*am_send*/
#endif
};
#endif
static PyTypeObject __pyx__PyAsyncGenAThrowType_type = {
PyVarObject_HEAD_INIT(0, 0)
"async_generator_athrow", /* tp_name */
sizeof(__pyx_PyAsyncGenAThrow), /* tp_basicsize */
0, /* tp_itemsize */
(destructor)__Pyx_async_gen_athrow_dealloc, /* tp_dealloc */
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
#if CYTHON_USE_ASYNC_SLOTS
&__Pyx_async_gen_athrow_as_async, /* tp_as_async */
#else
0, /*tp_reserved*/
#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
0, /* tp_as_mapping */
0, /* tp_hash */
0, /* tp_call */
0, /* tp_str */
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
(traverseproc)__Pyx_async_gen_athrow_traverse, /* tp_traverse */
0, /* tp_clear */
#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
// in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
__Pyx_Coroutine_compare, /*tp_richcompare*/
#else
0, /*tp_richcompare*/
#endif
0, /* tp_weaklistoffset */
PyObject_SelfIter, /* tp_iter */
(iternextfunc)__Pyx_async_gen_athrow_iternext, /* tp_iternext */
__Pyx_async_gen_athrow_methods, /* tp_methods */
0, /* tp_members */
0, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
0, /* tp_init */
0, /* tp_alloc */
0, /* tp_new */
0, /* tp_free */
0, /* tp_is_gc */
0, /* tp_bases */
0, /* tp_mro */
0, /* tp_cache */
0, /* tp_subclasses */
0, /* tp_weaklist */
0, /* tp_del */
0, /* tp_version_tag */
#if PY_VERSION_HEX >= 0x030400a1
0, /* tp_finalize */
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static PyObject *
__Pyx_async_gen_athrow_new(__pyx_PyAsyncGenObject *gen, PyObject *args)
{
__pyx_PyAsyncGenAThrow *o;
o = PyObject_GC_New(__pyx_PyAsyncGenAThrow, __pyx__PyAsyncGenAThrowType);
if (o == NULL) {
return NULL;
}
o->agt_gen = gen;
o->agt_args = args;
o->agt_state = __PYX_AWAITABLE_STATE_INIT;
Py_INCREF(gen);
Py_XINCREF(args);
PyObject_GC_Track((PyObject*)o);
return (PyObject*)o;
}
/* ---------- global type sharing ------------ */
static int __pyx_AsyncGen_init(void) {
// on Windows, C-API functions can't be used in slots statically
__pyx_AsyncGenType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx__PyAsyncGenWrappedValueType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx__PyAsyncGenAThrowType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx__PyAsyncGenASendType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_AsyncGenType = __Pyx_FetchCommonType(&__pyx_AsyncGenType_type);
if (unlikely(!__pyx_AsyncGenType))
return -1;
__pyx__PyAsyncGenAThrowType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenAThrowType_type);
if (unlikely(!__pyx__PyAsyncGenAThrowType))
return -1;
__pyx__PyAsyncGenWrappedValueType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenWrappedValueType_type);
if (unlikely(!__pyx__PyAsyncGenWrappedValueType))
return -1;
__pyx__PyAsyncGenASendType = __Pyx_FetchCommonType(&__pyx__PyAsyncGenASendType_type);
if (unlikely(!__pyx__PyAsyncGenASendType))
return -1;
return 0;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Utility/Buffer.c 0000644 0001751 0000171 00000071726 00000000000 020241 0 ustar 00runner docker 0000000 0000000 /////////////// BufferStructDeclare.proto ///////////////
/* structs for buffer access */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[{{max_dims}}];
} __Pyx_LocalBuf_ND;
/////////////// BufferIndexError.proto ///////////////
static void __Pyx_RaiseBufferIndexError(int axis); /*proto*/
/////////////// BufferIndexError ///////////////
static void __Pyx_RaiseBufferIndexError(int axis) {
PyErr_Format(PyExc_IndexError,
"Out of bounds on buffer access (axis %d)", axis);
}
/////////////// BufferIndexErrorNogil.proto ///////////////
//@requires: BufferIndexError
static void __Pyx_RaiseBufferIndexErrorNogil(int axis); /*proto*/
/////////////// BufferIndexErrorNogil ///////////////
static void __Pyx_RaiseBufferIndexErrorNogil(int axis) {
#ifdef WITH_THREAD
PyGILState_STATE gilstate = PyGILState_Ensure();
#endif
__Pyx_RaiseBufferIndexError(axis);
#ifdef WITH_THREAD
PyGILState_Release(gilstate);
#endif
}
/////////////// BufferFallbackError.proto ///////////////
static void __Pyx_RaiseBufferFallbackError(void); /*proto*/
/////////////// BufferFallbackError ///////////////
static void __Pyx_RaiseBufferFallbackError(void) {
PyErr_SetString(PyExc_ValueError,
"Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!");
}
/////////////// BufferFormatStructs.proto ///////////////
//@proto_block: utility_code_proto_before_types
#define IS_UNSIGNED(type) (((type) -1) > 0)
/* Run-time type information about structs used with buffers */
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name; /* for error messages only */
struct __Pyx_StructField_* fields;
size_t size; /* sizeof(type) */
size_t arraysize[8]; /* length of array in each dimension */
int ndim;
char typegroup; /* _R_eal, _C_omplex, Signed _I_nt, _U_nsigned int, _S_truct, _P_ointer, _O_bject, c_H_ar */
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/////////////// GetAndReleaseBuffer.proto ///////////////
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/////////////// GetAndReleaseBuffer ///////////////
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
{{for type_ptr, getbuffer, releasebuffer in types}}
{{if getbuffer}}
if (__Pyx_TypeCheck(obj, {{type_ptr}})) return {{getbuffer}}(obj, view, flags);
{{endif}}
{{endfor}}
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
{{for type_ptr, getbuffer, releasebuffer in types}}
{{if releasebuffer}}
else if (__Pyx_TypeCheck(obj, {{type_ptr}})) {{releasebuffer}}(obj, view);
{{endif}}
{{endfor}}
view->obj = NULL;
Py_DECREF(obj);
}
#endif /* PY_MAJOR_VERSION < 3 */
/////////////// BufferGetAndValidate.proto ///////////////
#define __Pyx_GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack) \
((obj == Py_None || obj == NULL) ? \
(__Pyx_ZeroBuffer(buf), 0) : \
__Pyx__GetBufferAndValidate(buf, obj, dtype, flags, nd, cast, stack))
static int __Pyx__GetBufferAndValidate(Py_buffer* buf, PyObject* obj,
__Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack);
static void __Pyx_ZeroBuffer(Py_buffer* buf);
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info);/*proto*/
static Py_ssize_t __Pyx_minusones[] = { {{ ", ".join(["-1"] * max_dims) }} };
static Py_ssize_t __Pyx_zeros[] = { {{ ", ".join(["0"] * max_dims) }} };
/////////////// BufferGetAndValidate ///////////////
//@requires: BufferFormatCheck
static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) {
if (unlikely(info->buf == NULL)) return;
if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL;
__Pyx_ReleaseBuffer(info);
}
static void __Pyx_ZeroBuffer(Py_buffer* buf) {
buf->buf = NULL;
buf->obj = NULL;
buf->strides = __Pyx_zeros;
buf->shape = __Pyx_zeros;
buf->suboffsets = __Pyx_minusones;
}
static int __Pyx__GetBufferAndValidate(
Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags,
int nd, int cast, __Pyx_BufFmt_StackElem* stack)
{
buf->buf = NULL;
if (unlikely(__Pyx_GetBuffer(obj, buf, flags) == -1)) {
__Pyx_ZeroBuffer(buf);
return -1;
}
// From this point on, we have acquired the buffer and must release it on errors.
if (unlikely(buf->ndim != nd)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
nd, buf->ndim);
goto fail;
}
if (!cast) {
__Pyx_BufFmt_Context ctx;
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail;
}
if (unlikely((size_t)buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)",
buf->itemsize, (buf->itemsize > 1) ? "s" : "",
dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones;
return 0;
fail:;
__Pyx_SafeReleaseBuffer(buf);
return -1;
}
/////////////// BufferFormatCheck.proto ///////////////
// Buffer format string checking
//
// Buffer type checking. Utility code for checking that acquired
// buffers match our assumptions. We only need to check ndim and
// the format string; the access mode/flags is checked by the
// exporter. See:
//
// http://docs.python.org/3/library/struct.html
// http://legacy.python.org/dev/peps/pep-3118/#additions-to-the-struct-string-syntax
//
// The alignment code is copied from _struct.c in Python.
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type); /*proto*/
/////////////// BufferFormatCheck ///////////////
//@requires: ModuleSetupCode.c::IsLittleEndian
//@requires: BufferFormatStructs
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1) /* First char was not a digit */
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
/* printf("processing... %s\n", ctx->head->field->type->name); */
if (ctx->enc_type == 0) return 0;
/* Validate array size */
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
/* handle strings ('s' and 'p') */
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
/* special case -- treat as struct rather than complex number */
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
/* special case -- chars don't care about sign */
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count; /* Consume from buffer string */
/* Done checking, move to next field, pushing or popping struct stack if needed */
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break; /* breaks both loops as ctx->enc_count == 0 */
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue; /* empty struct */
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
/* Parse an array in the format string (e.g. (1,2,3)) */
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
/* Process the previous element */
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
// store ndim now, as field advanced by __Pyx_BufFmt_ProcessTypeChunk call
ndim = ctx->head->field->type->ndim;
/* Parse all numbers in the format string */
while (*ts && *ts != ')') {
// ignore space characters (not using isspace() due to C/C++ problem on MacOS-X)
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break; /* not a 'break' in the loop */
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
/* puts(ts); */
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T': /* substruct */
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0; /* Erase processed last struct element */
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}': /* end of substruct; either repeat or move on */
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0; /* Erase processed last struct element */
if (alignment && ctx->fmt_offset % alignment) {
/* Pad struct on size of the first member */
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
/* Continue pooling same type */
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
/* 's' or new type (cannot be added to current pool) */
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/////////////// TypeInfoCompare.proto ///////////////
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/////////////// TypeInfoCompare ///////////////
//@requires: BufferFormatStructs
// See if two dtypes are equal
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
/* Special case for chars */
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
/* Verify multidimensional C arrays */
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
/* Check for packed struct */
if (a->flags != b->flags)
return 0;
/* compare all struct fields */
if (a->fields || b->fields) {
/* Check if both have fields */
if (!(a->fields && b->fields))
return 0;
/* compare */
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
/* If all fields are processed, we have a match */
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/////////////// TypeInfoToFormat.proto ///////////////
struct __pyx_typeinfo_string {
char string[3];
};
static struct __pyx_typeinfo_string __Pyx_TypeInfoToFormat(__Pyx_TypeInfo *type);
/////////////// TypeInfoToFormat ///////////////
//@requires: BufferFormatStructs
// See also MemoryView.pyx:BufferFormatFromTypeInfo
static struct __pyx_typeinfo_string __Pyx_TypeInfoToFormat(__Pyx_TypeInfo *type) {
struct __pyx_typeinfo_string result = { {0} };
char *buf = (char *) result.string;
size_t size = type->size;
switch (type->typegroup) {
case 'H':
*buf = 'c';
break;
case 'I':
case 'U':
if (size == 1)
*buf = (type->is_unsigned) ? 'B' : 'b';
else if (size == 2)
*buf = (type->is_unsigned) ? 'H' : 'h';
else if (size == 4)
*buf = (type->is_unsigned) ? 'I' : 'i';
else if (size == 8)
*buf = (type->is_unsigned) ? 'Q' : 'q';
break;
case 'P':
*buf = 'P';
break;
case 'C':
{
__Pyx_TypeInfo complex_type = *type;
complex_type.typegroup = 'R';
complex_type.size /= 2;
*buf++ = 'Z';
*buf = __Pyx_TypeInfoToFormat(&complex_type).string[0];
break;
}
case 'R':
if (size == 4)
*buf = 'f';
else if (size == 8)
*buf = 'd';
else
*buf = 'g';
break;
}
return result;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Utility/Builtins.c 0000644 0001751 0000171 00000040662 00000000000 020614 0 ustar 00runner docker 0000000 0000000 /*
* Special implementations of built-in functions and methods.
*
* Optional optimisations for builtins are in Optimize.c.
*
* General object operations and protocols are in ObjectHandling.c.
*/
//////////////////// Globals.proto ////////////////////
static PyObject* __Pyx_Globals(void); /*proto*/
//////////////////// Globals ////////////////////
//@substitute: naming
//@requires: ObjectHandling.c::GetAttr
// This is a stub implementation until we have something more complete.
// Currently, we only handle the most common case of a read-only dict
// of Python names. Supporting cdef names in the module and write
// access requires a rewrite as a dedicated class.
static PyObject* __Pyx_Globals(void) {
Py_ssize_t i;
PyObject *names;
PyObject *globals = $moddict_cname;
Py_INCREF(globals);
names = PyObject_Dir($module_cname);
if (!names)
goto bad;
for (i = PyList_GET_SIZE(names)-1; i >= 0; i--) {
#if CYTHON_COMPILING_IN_PYPY
PyObject* name = PySequence_ITEM(names, i);
if (!name)
goto bad;
#else
PyObject* name = PyList_GET_ITEM(names, i);
#endif
if (!PyDict_Contains(globals, name)) {
PyObject* value = __Pyx_GetAttr($module_cname, name);
if (!value) {
#if CYTHON_COMPILING_IN_PYPY
Py_DECREF(name);
#endif
goto bad;
}
if (PyDict_SetItem(globals, name, value) < 0) {
#if CYTHON_COMPILING_IN_PYPY
Py_DECREF(name);
#endif
Py_DECREF(value);
goto bad;
}
}
#if CYTHON_COMPILING_IN_PYPY
Py_DECREF(name);
#endif
}
Py_DECREF(names);
return globals;
bad:
Py_XDECREF(names);
Py_XDECREF(globals);
return NULL;
}
//////////////////// PyExecGlobals.proto ////////////////////
static PyObject* __Pyx_PyExecGlobals(PyObject*);
//////////////////// PyExecGlobals ////////////////////
//@requires: Globals
//@requires: PyExec
static PyObject* __Pyx_PyExecGlobals(PyObject* code) {
PyObject* result;
PyObject* globals = __Pyx_Globals();
if (unlikely(!globals))
return NULL;
result = __Pyx_PyExec2(code, globals);
Py_DECREF(globals);
return result;
}
//////////////////// PyExec.proto ////////////////////
static PyObject* __Pyx_PyExec3(PyObject*, PyObject*, PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject*, PyObject*);
//////////////////// PyExec ////////////////////
//@substitute: naming
static CYTHON_INLINE PyObject* __Pyx_PyExec2(PyObject* o, PyObject* globals) {
return __Pyx_PyExec3(o, globals, NULL);
}
static PyObject* __Pyx_PyExec3(PyObject* o, PyObject* globals, PyObject* locals) {
PyObject* result;
PyObject* s = 0;
char *code = 0;
if (!globals || globals == Py_None) {
globals = $moddict_cname;
} else if (!PyDict_Check(globals)) {
PyErr_Format(PyExc_TypeError, "exec() arg 2 must be a dict, not %.200s",
Py_TYPE(globals)->tp_name);
goto bad;
}
if (!locals || locals == Py_None) {
locals = globals;
}
if (__Pyx_PyDict_GetItemStr(globals, PYIDENT("__builtins__")) == NULL) {
if (PyDict_SetItem(globals, PYIDENT("__builtins__"), PyEval_GetBuiltins()) < 0)
goto bad;
}
if (PyCode_Check(o)) {
if (__Pyx_PyCode_HasFreeVars((PyCodeObject *)o)) {
PyErr_SetString(PyExc_TypeError,
"code object passed to exec() may not contain free variables");
goto bad;
}
#if PY_VERSION_HEX < 0x030200B1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400)
result = PyEval_EvalCode((PyCodeObject *)o, globals, locals);
#else
result = PyEval_EvalCode(o, globals, locals);
#endif
} else {
PyCompilerFlags cf;
cf.cf_flags = 0;
#if PY_VERSION_HEX >= 0x030800A3
cf.cf_feature_version = PY_MINOR_VERSION;
#endif
if (PyUnicode_Check(o)) {
cf.cf_flags = PyCF_SOURCE_IS_UTF8;
s = PyUnicode_AsUTF8String(o);
if (!s) goto bad;
o = s;
#if PY_MAJOR_VERSION >= 3
} else if (!PyBytes_Check(o)) {
#else
} else if (!PyString_Check(o)) {
#endif
PyErr_Format(PyExc_TypeError,
"exec: arg 1 must be string, bytes or code object, got %.200s",
Py_TYPE(o)->tp_name);
goto bad;
}
#if PY_MAJOR_VERSION >= 3
code = PyBytes_AS_STRING(o);
#else
code = PyString_AS_STRING(o);
#endif
if (PyEval_MergeCompilerFlags(&cf)) {
result = PyRun_StringFlags(code, Py_file_input, globals, locals, &cf);
} else {
result = PyRun_String(code, Py_file_input, globals, locals);
}
Py_XDECREF(s);
}
return result;
bad:
Py_XDECREF(s);
return 0;
}
//////////////////// GetAttr3.proto ////////////////////
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /*proto*/
//////////////////// GetAttr3 ////////////////////
//@requires: ObjectHandling.c::GetAttr
//@requires: Exceptions.c::PyThreadStateGet
//@requires: Exceptions.c::PyErrFetchRestore
//@requires: Exceptions.c::PyErrExceptionMatches
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
//////////////////// HasAttr.proto ////////////////////
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /*proto*/
//////////////////// HasAttr ////////////////////
//@requires: ObjectHandling.c::GetAttr
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
//////////////////// Intern.proto ////////////////////
static PyObject* __Pyx_Intern(PyObject* s); /* proto */
//////////////////// Intern ////////////////////
static PyObject* __Pyx_Intern(PyObject* s) {
if (!(likely(PyString_CheckExact(s)))) {
PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "str", Py_TYPE(s)->tp_name);
return 0;
}
Py_INCREF(s);
#if PY_MAJOR_VERSION >= 3
PyUnicode_InternInPlace(&s);
#else
PyString_InternInPlace(&s);
#endif
return s;
}
//////////////////// abs_longlong.proto ////////////////////
static CYTHON_INLINE PY_LONG_LONG __Pyx_abs_longlong(PY_LONG_LONG x) {
#if defined (__cplusplus) && __cplusplus >= 201103L
return std::abs(x);
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
return llabs(x);
#elif defined (_MSC_VER)
// abs() is defined for long, but 64-bits type on MSVC is long long.
// Use MS-specific _abs64() instead, which returns the original (negative) value for abs(-MAX-1)
return _abs64(x);
#elif defined (__GNUC__)
// gcc or clang on 64 bit windows.
return __builtin_llabs(x);
#else
if (sizeof(PY_LONG_LONG) <= sizeof(Py_ssize_t))
return __Pyx_sst_abs(x);
return (x<0) ? -x : x;
#endif
}
//////////////////// py_abs.proto ////////////////////
#if CYTHON_USE_PYLONG_INTERNALS
static PyObject *__Pyx_PyLong_AbsNeg(PyObject *num);/*proto*/
#define __Pyx_PyNumber_Absolute(x) \
((likely(PyLong_CheckExact(x))) ? \
(likely(Py_SIZE(x) >= 0) ? (Py_INCREF(x), (x)) : __Pyx_PyLong_AbsNeg(x)) : \
PyNumber_Absolute(x))
#else
#define __Pyx_PyNumber_Absolute(x) PyNumber_Absolute(x)
#endif
//////////////////// py_abs ////////////////////
#if CYTHON_USE_PYLONG_INTERNALS
static PyObject *__Pyx_PyLong_AbsNeg(PyObject *n) {
if (likely(Py_SIZE(n) == -1)) {
// digits are unsigned
return PyLong_FromLong(((PyLongObject*)n)->ob_digit[0]);
}
#if CYTHON_COMPILING_IN_CPYTHON
{
PyObject *copy = _PyLong_Copy((PyLongObject*)n);
if (likely(copy)) {
// negate the size to swap the sign
__Pyx_SET_SIZE(copy, -Py_SIZE(copy));
}
return copy;
}
#else
return PyNumber_Negative(n);
#endif
}
#endif
//////////////////// pow2.proto ////////////////////
#define __Pyx_PyNumber_Power2(a, b) PyNumber_Power(a, b, Py_None)
//////////////////// object_ord.proto ////////////////////
//@requires: TypeConversion.c::UnicodeAsUCS4
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyObject_Ord(c) \
(likely(PyUnicode_Check(c)) ? (long)__Pyx_PyUnicode_AsPy_UCS4(c) : __Pyx__PyObject_Ord(c))
#else
#define __Pyx_PyObject_Ord(c) __Pyx__PyObject_Ord(c)
#endif
static long __Pyx__PyObject_Ord(PyObject* c); /*proto*/
//////////////////// object_ord ////////////////////
static long __Pyx__PyObject_Ord(PyObject* c) {
Py_ssize_t size;
if (PyBytes_Check(c)) {
size = PyBytes_GET_SIZE(c);
if (likely(size == 1)) {
return (unsigned char) PyBytes_AS_STRING(c)[0];
}
#if PY_MAJOR_VERSION < 3
} else if (PyUnicode_Check(c)) {
return (long)__Pyx_PyUnicode_AsPy_UCS4(c);
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
} else if (PyByteArray_Check(c)) {
size = PyByteArray_GET_SIZE(c);
if (likely(size == 1)) {
return (unsigned char) PyByteArray_AS_STRING(c)[0];
}
#endif
} else {
// FIXME: support character buffers - but CPython doesn't support them either
PyErr_Format(PyExc_TypeError,
"ord() expected string of length 1, but %.200s found", Py_TYPE(c)->tp_name);
return (long)(Py_UCS4)-1;
}
PyErr_Format(PyExc_TypeError,
"ord() expected a character, but string of length %zd found", size);
return (long)(Py_UCS4)-1;
}
//////////////////// py_dict_keys.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d); /*proto*/
//////////////////// py_dict_keys ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_Keys(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d);
else
return PyDict_Keys(d);
}
//////////////////// py_dict_values.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d); /*proto*/
//////////////////// py_dict_values ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_Values(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "values", d);
else
return PyDict_Values(d);
}
//////////////////// py_dict_items.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d); /*proto*/
//////////////////// py_dict_items ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_Items(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "items", d);
else
return PyDict_Items(d);
}
//////////////////// py_dict_iterkeys.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d); /*proto*/
//////////////////// py_dict_iterkeys ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_IterKeys(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d);
else
return CALL_UNBOUND_METHOD(PyDict_Type, "iterkeys", d);
}
//////////////////// py_dict_itervalues.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d); /*proto*/
//////////////////// py_dict_itervalues ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_IterValues(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "values", d);
else
return CALL_UNBOUND_METHOD(PyDict_Type, "itervalues", d);
}
//////////////////// py_dict_iteritems.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d); /*proto*/
//////////////////// py_dict_iteritems ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_IterItems(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "items", d);
else
return CALL_UNBOUND_METHOD(PyDict_Type, "iteritems", d);
}
//////////////////// py_dict_viewkeys.proto ////////////////////
#if PY_VERSION_HEX < 0x02070000
#error This module uses dict views, which require Python 2.7 or later
#endif
static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d); /*proto*/
//////////////////// py_dict_viewkeys ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewKeys(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "keys", d);
else
return CALL_UNBOUND_METHOD(PyDict_Type, "viewkeys", d);
}
//////////////////// py_dict_viewvalues.proto ////////////////////
#if PY_VERSION_HEX < 0x02070000
#error This module uses dict views, which require Python 2.7 or later
#endif
static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d); /*proto*/
//////////////////// py_dict_viewvalues ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewValues(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "values", d);
else
return CALL_UNBOUND_METHOD(PyDict_Type, "viewvalues", d);
}
//////////////////// py_dict_viewitems.proto ////////////////////
#if PY_VERSION_HEX < 0x02070000
#error This module uses dict views, which require Python 2.7 or later
#endif
static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d); /*proto*/
//////////////////// py_dict_viewitems ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyDict_ViewItems(PyObject* d) {
if (PY_MAJOR_VERSION >= 3)
return CALL_UNBOUND_METHOD(PyDict_Type, "items", d);
else
return CALL_UNBOUND_METHOD(PyDict_Type, "viewitems", d);
}
//////////////////// pyfrozenset_new.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it);
//////////////////// pyfrozenset_new ////////////////////
//@substitute: naming
static CYTHON_INLINE PyObject* __Pyx_PyFrozenSet_New(PyObject* it) {
if (it) {
PyObject* result;
#if CYTHON_COMPILING_IN_PYPY
// PyPy currently lacks PyFrozenSet_CheckExact() and PyFrozenSet_New()
PyObject* args;
args = PyTuple_Pack(1, it);
if (unlikely(!args))
return NULL;
result = PyObject_Call((PyObject*)&PyFrozenSet_Type, args, NULL);
Py_DECREF(args);
return result;
#else
if (PyFrozenSet_CheckExact(it)) {
Py_INCREF(it);
return it;
}
result = PyFrozenSet_New(it);
if (unlikely(!result))
return NULL;
if ((PY_VERSION_HEX >= 0x031000A1) || likely(PySet_GET_SIZE(result)))
return result;
// empty frozenset is a singleton (on Python <3.10)
// seems wasteful, but CPython does the same
Py_DECREF(result);
#endif
}
#if CYTHON_USE_TYPE_SLOTS
return PyFrozenSet_Type.tp_new(&PyFrozenSet_Type, $empty_tuple, NULL);
#else
return PyObject_Call((PyObject*)&PyFrozenSet_Type, $empty_tuple, NULL);
#endif
}
//////////////////// PySet_Update.proto ////////////////////
static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it); /*proto*/
//////////////////// PySet_Update ////////////////////
static CYTHON_INLINE int __Pyx_PySet_Update(PyObject* set, PyObject* it) {
PyObject *retval;
#if CYTHON_USE_TYPE_SLOTS && !CYTHON_COMPILING_IN_PYPY
if (PyAnySet_Check(it)) {
if (PySet_GET_SIZE(it) == 0)
return 0;
// fast and safe case: CPython will update our result set and return it
retval = PySet_Type.tp_as_number->nb_inplace_or(set, it);
if (likely(retval == set)) {
Py_DECREF(retval);
return 0;
}
if (unlikely(!retval))
return -1;
// unusual result, fall through to set.update() call below
Py_DECREF(retval);
}
#endif
retval = CALL_UNBOUND_METHOD(PySet_Type, "update", set, it);
if (unlikely(!retval)) return -1;
Py_DECREF(retval);
return 0;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Utility/CConvert.pyx 0000644 0001751 0000171 00000010362 00000000000 021136 0 ustar 00runner docker 0000000 0000000 #################### FromPyStructUtility ####################
cdef extern from *:
ctypedef struct PyTypeObject:
char* tp_name
PyTypeObject *Py_TYPE(obj)
bint PyMapping_Check(obj)
object PyErr_Format(exc, const char *format, ...)
@cname("{{funcname}}")
cdef {{struct_type}} {{funcname}}(obj) except *:
cdef {{struct_type}} result
if not PyMapping_Check(obj):
PyErr_Format(TypeError, b"Expected %.16s, got %.200s", b"a mapping", Py_TYPE(obj).tp_name)
{{for member in var_entries:}}
try:
value = obj['{{member.name}}']
except KeyError:
raise ValueError("No value specified for struct attribute '{{member.name}}'")
result.{{member.cname}} = value
{{endfor}}
return result
#################### FromPyUnionUtility ####################
cdef extern from *:
ctypedef struct PyTypeObject:
char* tp_name
PyTypeObject *Py_TYPE(obj)
bint PyMapping_Check(obj)
object PyErr_Format(exc, const char *format, ...)
@cname("{{funcname}}")
cdef {{struct_type}} {{funcname}}(obj) except *:
cdef {{struct_type}} result
cdef Py_ssize_t length
if not PyMapping_Check(obj):
PyErr_Format(TypeError, b"Expected %.16s, got %.200s", b"a mapping", Py_TYPE(obj).tp_name)
last_found = None
length = len(obj)
if length:
{{for member in var_entries:}}
if '{{member.name}}' in obj:
if last_found is not None:
raise ValueError("More than one union attribute passed: '%s' and '%s'" % (last_found, '{{member.name}}'))
last_found = '{{member.name}}'
result.{{member.cname}} = obj['{{member.name}}']
length -= 1
if not length:
return result
{{endfor}}
if last_found is None:
raise ValueError("No value specified for any of the union attributes (%s)" %
'{{", ".join(member.name for member in var_entries)}}')
return result
#################### cfunc.to_py ####################
@cname("{{cname}}")
cdef object {{cname}}({{return_type.ctype}} (*f)({{ ', '.join(arg.type_cname for arg in args) }}) {{except_clause}}):
def wrap({{ ', '.join('{arg.ctype} {arg.name}'.format(arg=arg) for arg in args) }}):
"""wrap({{', '.join(('{arg.name}: {arg.type_displayname}'.format(arg=arg) if arg.type_displayname else arg.name) for arg in args)}}){{if return_type.type_displayname}} -> {{return_type.type_displayname}}{{endif}}"""
{{'' if return_type.type.is_void else 'return '}}f({{ ', '.join(arg.name for arg in args) }})
return wrap
#################### carray.from_py ####################
cdef extern from *:
object PyErr_Format(exc, const char *format, ...)
@cname("{{cname}}")
cdef int {{cname}}(object o, {{base_type}} *v, Py_ssize_t length) except -1:
cdef Py_ssize_t i = length
try:
i = len(o)
except (TypeError, OverflowError):
pass
if i == length:
for i, item in enumerate(o):
if i >= length:
break
v[i] = item
else:
i += 1 # convert index to length
if i == length:
return 0
PyErr_Format(
IndexError,
("too many values found during array assignment, expected %zd"
if i >= length else
"not enough values found during array assignment, expected %zd, got %zd"),
length, i)
#################### carray.to_py ####################
cdef extern from *:
void Py_INCREF(object o)
tuple PyTuple_New(Py_ssize_t size)
list PyList_New(Py_ssize_t size)
void PyTuple_SET_ITEM(object p, Py_ssize_t pos, object o)
void PyList_SET_ITEM(object p, Py_ssize_t pos, object o)
@cname("{{cname}}")
cdef inline list {{cname}}({{base_type}} *v, Py_ssize_t length):
cdef size_t i
cdef object value
l = PyList_New(length)
for i in range(length):
value = v[i]
Py_INCREF(value)
PyList_SET_ITEM(l, i, value)
return l
@cname("{{to_tuple_cname}}")
cdef inline tuple {{to_tuple_cname}}({{base_type}} *v, Py_ssize_t length):
cdef size_t i
cdef object value
t = PyTuple_New(length)
for i in range(length):
value = v[i]
Py_INCREF(value)
PyTuple_SET_ITEM(t, i, value)
return t
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Utility/CMath.c 0000644 0001751 0000171 00000005006 00000000000 020010 0 ustar 00runner docker 0000000 0000000
/////////////// CDivisionWarning.proto ///////////////
static int __Pyx_cdivision_warning(const char *, int); /* proto */
/////////////// CDivisionWarning ///////////////
static int __Pyx_cdivision_warning(const char *filename, int lineno) {
#if CYTHON_COMPILING_IN_PYPY
// avoid compiler warnings
filename++; lineno++;
return PyErr_Warn(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ");
#else
return PyErr_WarnExplicit(PyExc_RuntimeWarning,
"division with oppositely signed operands, C and Python semantics differ",
filename,
lineno,
__Pyx_MODULE_NAME,
NULL);
#endif
}
/////////////// DivInt.proto ///////////////
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s, %(type)s); /* proto */
/////////////// DivInt ///////////////
static CYTHON_INLINE %(type)s __Pyx_div_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s q = a / b;
%(type)s r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/////////////// ModInt.proto ///////////////
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
/////////////// ModInt ///////////////
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = a %% b;
r += ((r != 0) & ((r ^ b) < 0)) * b;
return r;
}
/////////////// ModFloat.proto ///////////////
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s, %(type)s); /* proto */
/////////////// ModFloat ///////////////
static CYTHON_INLINE %(type)s __Pyx_mod_%(type_name)s(%(type)s a, %(type)s b) {
%(type)s r = fmod%(math_h_modifier)s(a, b);
r += ((r != 0) & ((r < 0) ^ (b < 0))) * b;
return r;
}
/////////////// IntPow.proto ///////////////
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s, %(type)s); /* proto */
/////////////// IntPow ///////////////
static CYTHON_INLINE %(type)s %(func_name)s(%(type)s b, %(type)s e) {
%(type)s t = b;
switch (e) {
case 3:
t *= b;
CYTHON_FALLTHROUGH;
case 2:
t *= b;
CYTHON_FALLTHROUGH;
case 1:
return t;
case 0:
return 1;
}
#if %(signed)s
if (unlikely(e<0)) return 0;
#endif
t = 1;
while (likely(e)) {
t *= (b * (e&1)) | ((~e)&1); /* 1 or b */
b *= b;
e >>= 1;
}
return t;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Utility/Capsule.c 0000644 0001751 0000171 00000000771 00000000000 020414 0 ustar 00runner docker 0000000 0000000 //////////////// Capsule.proto ////////////////
/* Todo: wrap the rest of the functionality in similar functions */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
//////////////// Capsule ////////////////
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Utility/CommonStructures.c 0000644 0001751 0000171 00000004776 00000000000 022365 0 ustar 00runner docker 0000000 0000000 /////////////// FetchCommonType.proto ///////////////
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
/////////////// FetchCommonType ///////////////
static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) {
PyObject* fake_module;
PyTypeObject* cached_type = NULL;
fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
if (!fake_module) return NULL;
Py_INCREF(fake_module);
cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name);
if (cached_type) {
if (!PyType_Check((PyObject*)cached_type)) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s is not a type object",
type->tp_name);
goto bad;
}
if (cached_type->tp_basicsize != type->tp_basicsize) {
PyErr_Format(PyExc_TypeError,
"Shared Cython type %.200s has the wrong size, try recompiling",
type->tp_name);
goto bad;
}
} else {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
PyErr_Clear();
if (PyType_Ready(type) < 0) goto bad;
if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0)
goto bad;
Py_INCREF(type);
cached_type = type;
}
done:
Py_DECREF(fake_module);
// NOTE: always returns owned reference, or NULL on error
return cached_type;
bad:
Py_XDECREF(cached_type);
cached_type = NULL;
goto done;
}
/////////////// FetchCommonPointer.proto ///////////////
static void* __Pyx_FetchCommonPointer(void* pointer, const char* name);
/////////////// FetchCommonPointer ///////////////
static void* __Pyx_FetchCommonPointer(void* pointer, const char* name) {
#if PY_VERSION_HEX >= 0x02070000
PyObject* fake_module = NULL;
PyObject* capsule = NULL;
void* value = NULL;
fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
if (!fake_module) return NULL;
Py_INCREF(fake_module);
capsule = PyObject_GetAttrString(fake_module, name);
if (!capsule) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
PyErr_Clear();
capsule = PyCapsule_New(pointer, name, NULL);
if (!capsule) goto bad;
if (PyObject_SetAttrString(fake_module, name, capsule) < 0)
goto bad;
}
value = PyCapsule_GetPointer(capsule, name);
bad:
Py_XDECREF(capsule);
Py_DECREF(fake_module);
return value;
#else
return pointer;
#endif
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5154333
Cython-0.29.28/Cython/Utility/Complex.c 0000644 0001751 0000171 00000023473 00000000000 020433 0 ustar 00runner docker 0000000 0000000 /////////////// Header.proto ///////////////
//@proto_block: h_code
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include
#else
#include
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
/////////////// RealImag.proto ///////////////
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX \
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/////////////// Declarations.proto ///////////////
//@proto_block: complex_type_declarations
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< {{real_type}} > {{type_name}};
#else
typedef {{real_type}} _Complex {{type_name}};
#endif
#else
typedef struct { {{real_type}} real, imag; } {{type_name}};
#endif
static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}}, {{real_type}});
/////////////// Declarations ///////////////
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) {
return ::std::complex< {{real_type}} >(x, y);
}
#else
static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) {
return x + y*({{type}})_Complex_I;
}
#endif
#else
static CYTHON_INLINE {{type}} {{type_name}}_from_parts({{real_type}} x, {{real_type}} y) {
{{type}} z;
z.real = x;
z.imag = y;
return z;
}
#endif
/////////////// ToPy.proto ///////////////
#define __pyx_PyComplex_FromComplex(z) \
PyComplex_FromDoubles((double)__Pyx_CREAL(z), \
(double)__Pyx_CIMAG(z))
/////////////// FromPy.proto ///////////////
static {{type}} __Pyx_PyComplex_As_{{type_name}}(PyObject*);
/////////////// FromPy ///////////////
static {{type}} __Pyx_PyComplex_As_{{type_name}}(PyObject* o) {
Py_complex cval;
#if !CYTHON_COMPILING_IN_PYPY
if (PyComplex_CheckExact(o))
cval = ((PyComplexObject *)o)->cval;
else
#endif
cval = PyComplex_AsCComplex(o);
return {{type_name}}_from_parts(
({{real_type}})cval.real,
({{real_type}})cval.imag);
}
/////////////// Arithmetic.proto ///////////////
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq{{func_suffix}}(a, b) ((a)==(b))
#define __Pyx_c_sum{{func_suffix}}(a, b) ((a)+(b))
#define __Pyx_c_diff{{func_suffix}}(a, b) ((a)-(b))
#define __Pyx_c_prod{{func_suffix}}(a, b) ((a)*(b))
#define __Pyx_c_quot{{func_suffix}}(a, b) ((a)/(b))
#define __Pyx_c_neg{{func_suffix}}(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero{{func_suffix}}(z) ((z)==({{real_type}})0)
#define __Pyx_c_conj{{func_suffix}}(z) (::std::conj(z))
#if {{is_float}}
#define __Pyx_c_abs{{func_suffix}}(z) (::std::abs(z))
#define __Pyx_c_pow{{func_suffix}}(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero{{func_suffix}}(z) ((z)==0)
#define __Pyx_c_conj{{func_suffix}}(z) (conj{{m}}(z))
#if {{is_float}}
#define __Pyx_c_abs{{func_suffix}}(z) (cabs{{m}}(z))
#define __Pyx_c_pow{{func_suffix}}(a, b) (cpow{{m}}(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq{{func_suffix}}({{type}}, {{type}});
static CYTHON_INLINE {{type}} __Pyx_c_sum{{func_suffix}}({{type}}, {{type}});
static CYTHON_INLINE {{type}} __Pyx_c_diff{{func_suffix}}({{type}}, {{type}});
static CYTHON_INLINE {{type}} __Pyx_c_prod{{func_suffix}}({{type}}, {{type}});
static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}}, {{type}});
static CYTHON_INLINE {{type}} __Pyx_c_neg{{func_suffix}}({{type}});
static CYTHON_INLINE int __Pyx_c_is_zero{{func_suffix}}({{type}});
static CYTHON_INLINE {{type}} __Pyx_c_conj{{func_suffix}}({{type}});
#if {{is_float}}
static CYTHON_INLINE {{real_type}} __Pyx_c_abs{{func_suffix}}({{type}});
static CYTHON_INLINE {{type}} __Pyx_c_pow{{func_suffix}}({{type}}, {{type}});
#endif
#endif
/////////////// Arithmetic ///////////////
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq{{func_suffix}}({{type}} a, {{type}} b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE {{type}} __Pyx_c_sum{{func_suffix}}({{type}} a, {{type}} b) {
{{type}} z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE {{type}} __Pyx_c_diff{{func_suffix}}({{type}} a, {{type}} b) {
{{type}} z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE {{type}} __Pyx_c_prod{{func_suffix}}({{type}} a, {{type}} b) {
{{type}} z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if {{is_float}}
static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}} a, {{type}} b) {
if (b.imag == 0) {
return {{type_name}}_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs{{m}}(b.real) >= fabs{{m}}(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return {{type_name}}_from_parts(a.real / b.real, a.imag / b.imag);
} else {
{{real_type}} r = b.imag / b.real;
{{real_type}} s = ({{real_type}})(1.0) / (b.real + b.imag * r);
return {{type_name}}_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
{{real_type}} r = b.real / b.imag;
{{real_type}} s = ({{real_type}})(1.0) / (b.imag + b.real * r);
return {{type_name}}_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE {{type}} __Pyx_c_quot{{func_suffix}}({{type}} a, {{type}} b) {
if (b.imag == 0) {
return {{type_name}}_from_parts(a.real / b.real, a.imag / b.real);
} else {
{{real_type}} denom = b.real * b.real + b.imag * b.imag;
return {{type_name}}_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE {{type}} __Pyx_c_neg{{func_suffix}}({{type}} a) {
{{type}} z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero{{func_suffix}}({{type}} a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE {{type}} __Pyx_c_conj{{func_suffix}}({{type}} a) {
{{type}} z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if {{is_float}}
static CYTHON_INLINE {{real_type}} __Pyx_c_abs{{func_suffix}}({{type}} z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt{{m}}(z.real*z.real + z.imag*z.imag);
#else
return hypot{{m}}(z.real, z.imag);
#endif
}
static CYTHON_INLINE {{type}} __Pyx_c_pow{{func_suffix}}({{type}} a, {{type}} b) {
{{type}} z;
{{real_type}} r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
{{real_type}} denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod{{func_suffix}}(a, a);
case 3:
z = __Pyx_c_prod{{func_suffix}}(a, a);
return __Pyx_c_prod{{func_suffix}}(z, a);
case 4:
z = __Pyx_c_prod{{func_suffix}}(a, a);
return __Pyx_c_prod{{func_suffix}}(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow{{m}}(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2{{m}}(0.0, -1.0);
}
} else {
r = __Pyx_c_abs{{func_suffix}}(a);
theta = atan2{{m}}(a.imag, a.real);
}
lnr = log{{m}}(r);
z_r = exp{{m}}(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos{{m}}(z_theta);
z.imag = z_r * sin{{m}}(z_theta);
return z;
}
#endif
#endif
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/Coroutine.c 0000644 0001751 0000171 00000256527 00000000000 021003 0 ustar 00runner docker 0000000 0000000 //////////////////// GeneratorYieldFrom.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source);
//////////////////// GeneratorYieldFrom ////////////////////
//@requires: Generator
static void __PyxPyIter_CheckErrorAndDecref(PyObject *source) {
PyErr_Format(PyExc_TypeError,
"iter() returned non-iterator of type '%.100s'",
Py_TYPE(source)->tp_name);
Py_DECREF(source);
}
static CYTHON_INLINE PyObject* __Pyx_Generator_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) {
PyObject *source_gen, *retval;
#ifdef __Pyx_Coroutine_USED
if (__Pyx_Coroutine_Check(source)) {
// TODO: this should only happen for types.coroutine()ed generators, but we can't determine that here
Py_INCREF(source);
source_gen = source;
retval = __Pyx_Generator_Next(source);
} else
#endif
{
#if CYTHON_USE_TYPE_SLOTS
if (likely(Py_TYPE(source)->tp_iter)) {
source_gen = Py_TYPE(source)->tp_iter(source);
if (unlikely(!source_gen))
return NULL;
if (unlikely(!PyIter_Check(source_gen))) {
__PyxPyIter_CheckErrorAndDecref(source_gen);
return NULL;
}
} else
// CPython also allows non-iterable sequences to be iterated over
#endif
{
source_gen = PyObject_GetIter(source);
if (unlikely(!source_gen))
return NULL;
}
// source_gen is now the iterator, make the first next() call
#if CYTHON_USE_TYPE_SLOTS
retval = Py_TYPE(source_gen)->tp_iternext(source_gen);
#else
retval = PyIter_Next(source_gen);
#endif
}
if (likely(retval)) {
gen->yieldfrom = source_gen;
return retval;
}
Py_DECREF(source_gen);
return NULL;
}
//////////////////// CoroutineYieldFrom.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source);
//////////////////// CoroutineYieldFrom ////////////////////
//@requires: Coroutine
//@requires: GetAwaitIter
static PyObject* __Pyx__Coroutine_Yield_From_Generic(__pyx_CoroutineObject *gen, PyObject *source) {
PyObject *retval;
PyObject *source_gen = __Pyx__Coroutine_GetAwaitableIter(source);
if (unlikely(!source_gen)) {
return NULL;
}
// source_gen is now the iterator, make the first next() call
if (__Pyx_Coroutine_Check(source_gen)) {
retval = __Pyx_Generator_Next(source_gen);
} else {
#if CYTHON_USE_TYPE_SLOTS
retval = Py_TYPE(source_gen)->tp_iternext(source_gen);
#else
retval = PyIter_Next(source_gen);
#endif
}
if (retval) {
gen->yieldfrom = source_gen;
return retval;
}
Py_DECREF(source_gen);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_Coroutine_Yield_From(__pyx_CoroutineObject *gen, PyObject *source) {
PyObject *retval;
if (__Pyx_Coroutine_Check(source)) {
if (unlikely(((__pyx_CoroutineObject*)source)->yieldfrom)) {
PyErr_SetString(
PyExc_RuntimeError,
"coroutine is being awaited already");
return NULL;
}
retval = __Pyx_Generator_Next(source);
#ifdef __Pyx_AsyncGen_USED
// inlined "__pyx_PyAsyncGenASend" handling to avoid the series of generic calls
} else if (__pyx_PyAsyncGenASend_CheckExact(source)) {
retval = __Pyx_async_gen_asend_iternext(source);
#endif
} else {
return __Pyx__Coroutine_Yield_From_Generic(gen, source);
}
if (retval) {
Py_INCREF(source);
gen->yieldfrom = source;
}
return retval;
}
//////////////////// GetAwaitIter.proto ////////////////////
static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o); /*proto*/
static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *o); /*proto*/
//////////////////// GetAwaitIter ////////////////////
//@requires: ObjectHandling.c::PyObjectGetMethod
//@requires: ObjectHandling.c::PyObjectCallNoArg
//@requires: ObjectHandling.c::PyObjectCallOneArg
static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAwaitableIter(PyObject *o) {
#ifdef __Pyx_Coroutine_USED
if (__Pyx_Coroutine_Check(o)) {
return __Pyx_NewRef(o);
}
#endif
return __Pyx__Coroutine_GetAwaitableIter(o);
}
static void __Pyx_Coroutine_AwaitableIterError(PyObject *source) {
#if PY_VERSION_HEX >= 0x030600B3 || defined(_PyErr_FormatFromCause)
_PyErr_FormatFromCause(
PyExc_TypeError,
"'async for' received an invalid object "
"from __anext__: %.100s",
Py_TYPE(source)->tp_name);
#elif PY_MAJOR_VERSION >= 3
PyObject *exc, *val, *val2, *tb;
assert(PyErr_Occurred());
PyErr_Fetch(&exc, &val, &tb);
PyErr_NormalizeException(&exc, &val, &tb);
if (tb != NULL) {
PyException_SetTraceback(val, tb);
Py_DECREF(tb);
}
Py_DECREF(exc);
assert(!PyErr_Occurred());
PyErr_Format(
PyExc_TypeError,
"'async for' received an invalid object "
"from __anext__: %.100s",
Py_TYPE(source)->tp_name);
PyErr_Fetch(&exc, &val2, &tb);
PyErr_NormalizeException(&exc, &val2, &tb);
Py_INCREF(val);
PyException_SetCause(val2, val);
PyException_SetContext(val2, val);
PyErr_Restore(exc, val2, tb);
#else
// since Py2 does not have exception chaining, it's better to avoid shadowing exceptions there
source++;
#endif
}
// adapted from genobject.c in Py3.5
static PyObject *__Pyx__Coroutine_GetAwaitableIter(PyObject *obj) {
PyObject *res;
#if CYTHON_USE_ASYNC_SLOTS
__Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj);
if (likely(am && am->am_await)) {
res = (*am->am_await)(obj);
} else
#endif
#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact)
if (PyCoro_CheckExact(obj)) {
return __Pyx_NewRef(obj);
} else
#endif
#if CYTHON_COMPILING_IN_CPYTHON && defined(CO_ITERABLE_COROUTINE)
if (PyGen_CheckExact(obj) && ((PyGenObject*)obj)->gi_code && ((PyCodeObject *)((PyGenObject*)obj)->gi_code)->co_flags & CO_ITERABLE_COROUTINE) {
// Python generator marked with "@types.coroutine" decorator
return __Pyx_NewRef(obj);
} else
#endif
{
PyObject *method = NULL;
int is_method = __Pyx_PyObject_GetMethod(obj, PYIDENT("__await__"), &method);
if (likely(is_method)) {
res = __Pyx_PyObject_CallOneArg(method, obj);
} else if (likely(method)) {
res = __Pyx_PyObject_CallNoArg(method);
} else
goto slot_error;
Py_DECREF(method);
}
if (unlikely(!res)) {
// surprisingly, CPython replaces the exception here...
__Pyx_Coroutine_AwaitableIterError(obj);
goto bad;
}
if (unlikely(!PyIter_Check(res))) {
PyErr_Format(PyExc_TypeError,
"__await__() returned non-iterator of type '%.100s'",
Py_TYPE(res)->tp_name);
Py_CLEAR(res);
} else {
int is_coroutine = 0;
#ifdef __Pyx_Coroutine_USED
is_coroutine |= __Pyx_Coroutine_Check(res);
#endif
#if PY_VERSION_HEX >= 0x030500B2 || defined(PyCoro_CheckExact)
is_coroutine |= PyCoro_CheckExact(res);
#endif
if (unlikely(is_coroutine)) {
/* __await__ must return an *iterator*, not
a coroutine or another awaitable (see PEP 492) */
PyErr_SetString(PyExc_TypeError,
"__await__() returned a coroutine");
Py_CLEAR(res);
}
}
return res;
slot_error:
PyErr_Format(PyExc_TypeError,
"object %.100s can't be used in 'await' expression",
Py_TYPE(obj)->tp_name);
bad:
return NULL;
}
//////////////////// AsyncIter.proto ////////////////////
static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *o); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *o); /*proto*/
//////////////////// AsyncIter ////////////////////
//@requires: GetAwaitIter
//@requires: ObjectHandling.c::PyObjectCallMethod0
static PyObject *__Pyx_Coroutine_GetAsyncIter_Generic(PyObject *obj) {
#if PY_VERSION_HEX < 0x030500B1
{
PyObject *iter = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__"));
if (likely(iter))
return iter;
// FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__aiter__'
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return NULL;
}
#else
// avoid C warning about 'unused function'
if ((0)) (void) __Pyx_PyObject_CallMethod0(obj, PYIDENT("__aiter__"));
#endif
PyErr_Format(PyExc_TypeError, "'async for' requires an object with __aiter__ method, got %.100s",
Py_TYPE(obj)->tp_name);
return NULL;
}
static CYTHON_INLINE PyObject *__Pyx_Coroutine_GetAsyncIter(PyObject *obj) {
#ifdef __Pyx_AsyncGen_USED
if (__Pyx_AsyncGen_CheckExact(obj)) {
return __Pyx_NewRef(obj);
}
#endif
#if CYTHON_USE_ASYNC_SLOTS
{
__Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj);
if (likely(am && am->am_aiter)) {
return (*am->am_aiter)(obj);
}
}
#endif
return __Pyx_Coroutine_GetAsyncIter_Generic(obj);
}
static PyObject *__Pyx__Coroutine_AsyncIterNext(PyObject *obj) {
#if PY_VERSION_HEX < 0x030500B1
{
PyObject *value = __Pyx_PyObject_CallMethod0(obj, PYIDENT("__anext__"));
if (likely(value))
return value;
}
// FIXME: for the sake of a nicely conforming exception message, assume any AttributeError meant '__anext__'
if (PyErr_ExceptionMatches(PyExc_AttributeError))
#endif
PyErr_Format(PyExc_TypeError, "'async for' requires an object with __anext__ method, got %.100s",
Py_TYPE(obj)->tp_name);
return NULL;
}
static CYTHON_INLINE PyObject *__Pyx_Coroutine_AsyncIterNext(PyObject *obj) {
#ifdef __Pyx_AsyncGen_USED
if (__Pyx_AsyncGen_CheckExact(obj)) {
return __Pyx_async_gen_anext(obj);
}
#endif
#if CYTHON_USE_ASYNC_SLOTS
{
__Pyx_PyAsyncMethodsStruct* am = __Pyx_PyType_AsAsync(obj);
if (likely(am && am->am_anext)) {
return (*am->am_anext)(obj);
}
}
#endif
return __Pyx__Coroutine_AsyncIterNext(obj);
}
//////////////////// pep479.proto ////////////////////
static void __Pyx_Generator_Replace_StopIteration(int in_async_gen); /*proto*/
//////////////////// pep479 ////////////////////
//@requires: Exceptions.c::GetException
static void __Pyx_Generator_Replace_StopIteration(CYTHON_UNUSED int in_async_gen) {
PyObject *exc, *val, *tb, *cur_exc;
__Pyx_PyThreadState_declare
#ifdef __Pyx_StopAsyncIteration_USED
int is_async_stopiteration = 0;
#endif
cur_exc = PyErr_Occurred();
if (likely(!__Pyx_PyErr_GivenExceptionMatches(cur_exc, PyExc_StopIteration))) {
#ifdef __Pyx_StopAsyncIteration_USED
if (in_async_gen && unlikely(__Pyx_PyErr_GivenExceptionMatches(cur_exc, __Pyx_PyExc_StopAsyncIteration))) {
is_async_stopiteration = 1;
} else
#endif
return;
}
__Pyx_PyThreadState_assign
// Chain exceptions by moving Stop(Async)Iteration to exc_info before creating the RuntimeError.
// In Py2.x, no chaining happens, but the exception still stays visible in exc_info.
__Pyx_GetException(&exc, &val, &tb);
Py_XDECREF(exc);
Py_XDECREF(val);
Py_XDECREF(tb);
PyErr_SetString(PyExc_RuntimeError,
#ifdef __Pyx_StopAsyncIteration_USED
is_async_stopiteration ? "async generator raised StopAsyncIteration" :
in_async_gen ? "async generator raised StopIteration" :
#endif
"generator raised StopIteration");
}
//////////////////// CoroutineBase.proto ////////////////////
//@substitute: naming
typedef PyObject *(*__pyx_coroutine_body_t)(PyObject *, PyThreadState *, PyObject *);
#if CYTHON_USE_EXC_INFO_STACK
// See https://bugs.python.org/issue25612
#define __Pyx_ExcInfoStruct _PyErr_StackItem
#else
// Minimal replacement struct for Py<3.7, without the Py3.7 exception state stack.
typedef struct {
PyObject *exc_type;
PyObject *exc_value;
PyObject *exc_traceback;
} __Pyx_ExcInfoStruct;
#endif
typedef struct {
PyObject_HEAD
__pyx_coroutine_body_t body;
PyObject *closure;
__Pyx_ExcInfoStruct gi_exc_state;
PyObject *gi_weakreflist;
PyObject *classobj;
PyObject *yieldfrom;
PyObject *gi_name;
PyObject *gi_qualname;
PyObject *gi_modulename;
PyObject *gi_code;
PyObject *gi_frame;
int resume_label;
// using T_BOOL for property below requires char value
char is_running;
} __pyx_CoroutineObject;
static __pyx_CoroutineObject *__Pyx__Coroutine_New(
PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/
static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
__pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name); /*proto*/
static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self);
static int __Pyx_Coroutine_clear(PyObject *self); /*proto*/
static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value); /*proto*/
static PyObject *__Pyx_Coroutine_Close(PyObject *self); /*proto*/
static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args); /*proto*/
// macros for exception state swapping instead of inline functions to make use of the local thread state context
#if CYTHON_USE_EXC_INFO_STACK
#define __Pyx_Coroutine_SwapException(self)
#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state)
#else
#define __Pyx_Coroutine_SwapException(self) { \
__Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback); \
__Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state); \
}
#define __Pyx_Coroutine_ResetAndClearException(self) { \
__Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback); \
(self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL; \
}
#endif
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyGen_FetchStopIterationValue(pvalue) \
__Pyx_PyGen__FetchStopIterationValue($local_tstate_cname, pvalue)
#else
#define __Pyx_PyGen_FetchStopIterationValue(pvalue) \
__Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue)
#endif
static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue); /*proto*/
static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state); /*proto*/
//////////////////// Coroutine.proto ////////////////////
#define __Pyx_Coroutine_USED
static PyTypeObject *__pyx_CoroutineType = 0;
static PyTypeObject *__pyx_CoroutineAwaitType = 0;
#define __Pyx_Coroutine_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineType)
// __Pyx_Coroutine_Check(obj): see override for IterableCoroutine below
#define __Pyx_Coroutine_Check(obj) __Pyx_Coroutine_CheckExact(obj)
#define __Pyx_CoroutineAwait_CheckExact(obj) (Py_TYPE(obj) == __pyx_CoroutineAwaitType)
#define __Pyx_Coroutine_New(body, code, closure, name, qualname, module_name) \
__Pyx__Coroutine_New(__pyx_CoroutineType, body, code, closure, name, qualname, module_name)
static int __pyx_Coroutine_init(void); /*proto*/
static PyObject *__Pyx__Coroutine_await(PyObject *coroutine); /*proto*/
typedef struct {
PyObject_HEAD
PyObject *coroutine;
} __pyx_CoroutineAwaitObject;
static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, PyObject *arg); /*proto*/
static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args); /*proto*/
//////////////////// Generator.proto ////////////////////
#define __Pyx_Generator_USED
static PyTypeObject *__pyx_GeneratorType = 0;
#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType)
#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name) \
__Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name)
static PyObject *__Pyx_Generator_Next(PyObject *self);
static int __pyx_Generator_init(void); /*proto*/
//////////////////// AsyncGen ////////////////////
//@requires: AsyncGen.c::AsyncGenerator
// -> empty, only delegates to separate file
//////////////////// CoroutineBase ////////////////////
//@substitute: naming
//@requires: Exceptions.c::PyErrFetchRestore
//@requires: Exceptions.c::PyThreadStateGet
//@requires: Exceptions.c::SwapException
//@requires: Exceptions.c::RaiseException
//@requires: Exceptions.c::SaveResetException
//@requires: ObjectHandling.c::PyObjectCallMethod1
//@requires: ObjectHandling.c::PyObjectGetAttrStr
//@requires: CommonStructures.c::FetchCommonType
#include
#include
#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom)
// If StopIteration exception is set, fetches its 'value'
// attribute if any, otherwise sets pvalue to None.
//
// Returns 0 if no exception or StopIteration is set.
// If any other exception is set, returns -1 and leaves
// pvalue unchanged.
static int __Pyx_PyGen__FetchStopIterationValue(CYTHON_UNUSED PyThreadState *$local_tstate_cname, PyObject **pvalue) {
PyObject *et, *ev, *tb;
PyObject *value = NULL;
__Pyx_ErrFetch(&et, &ev, &tb);
if (!et) {
Py_XDECREF(tb);
Py_XDECREF(ev);
Py_INCREF(Py_None);
*pvalue = Py_None;
return 0;
}
// most common case: plain StopIteration without or with separate argument
if (likely(et == PyExc_StopIteration)) {
if (!ev) {
Py_INCREF(Py_None);
value = Py_None;
}
#if PY_VERSION_HEX >= 0x030300A0
else if (Py_TYPE(ev) == (PyTypeObject*)PyExc_StopIteration) {
value = ((PyStopIterationObject *)ev)->value;
Py_INCREF(value);
Py_DECREF(ev);
}
#endif
// PyErr_SetObject() and friends put the value directly into ev
else if (unlikely(PyTuple_Check(ev))) {
// if it's a tuple, it is interpreted as separate constructor arguments (surprise!)
if (PyTuple_GET_SIZE(ev) >= 1) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
value = PyTuple_GET_ITEM(ev, 0);
Py_INCREF(value);
#else
value = PySequence_ITEM(ev, 0);
#endif
} else {
Py_INCREF(Py_None);
value = Py_None;
}
Py_DECREF(ev);
}
else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) {
// 'steal' reference to ev
value = ev;
}
if (likely(value)) {
Py_XDECREF(tb);
Py_DECREF(et);
*pvalue = value;
return 0;
}
} else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) {
__Pyx_ErrRestore(et, ev, tb);
return -1;
}
// otherwise: normalise and check what that gives us
PyErr_NormalizeException(&et, &ev, &tb);
if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) {
// looks like normalisation failed - raise the new exception
__Pyx_ErrRestore(et, ev, tb);
return -1;
}
Py_XDECREF(tb);
Py_DECREF(et);
#if PY_VERSION_HEX >= 0x030300A0
value = ((PyStopIterationObject *)ev)->value;
Py_INCREF(value);
Py_DECREF(ev);
#else
{
PyObject* args = __Pyx_PyObject_GetAttrStr(ev, PYIDENT("args"));
Py_DECREF(ev);
if (likely(args)) {
value = PySequence_GetItem(args, 0);
Py_DECREF(args);
}
if (unlikely(!value)) {
__Pyx_ErrRestore(NULL, NULL, NULL);
Py_INCREF(Py_None);
value = Py_None;
}
}
#endif
*pvalue = value;
return 0;
}
static CYTHON_INLINE
void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) {
PyObject *t, *v, *tb;
t = exc_state->exc_type;
v = exc_state->exc_value;
tb = exc_state->exc_traceback;
exc_state->exc_type = NULL;
exc_state->exc_value = NULL;
exc_state->exc_traceback = NULL;
Py_XDECREF(t);
Py_XDECREF(v);
Py_XDECREF(tb);
}
#define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL)
static void __Pyx__Coroutine_AlreadyRunningError(CYTHON_UNUSED __pyx_CoroutineObject *gen) {
const char *msg;
if ((0)) {
#ifdef __Pyx_Coroutine_USED
} else if (__Pyx_Coroutine_Check((PyObject*)gen)) {
msg = "coroutine already executing";
#endif
#ifdef __Pyx_AsyncGen_USED
} else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) {
msg = "async generator already executing";
#endif
} else {
msg = "generator already executing";
}
PyErr_SetString(PyExc_ValueError, msg);
}
#define __Pyx_Coroutine_NotStartedError(gen) (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL)
static void __Pyx__Coroutine_NotStartedError(CYTHON_UNUSED PyObject *gen) {
const char *msg;
if ((0)) {
#ifdef __Pyx_Coroutine_USED
} else if (__Pyx_Coroutine_Check(gen)) {
msg = "can't send non-None value to a just-started coroutine";
#endif
#ifdef __Pyx_AsyncGen_USED
} else if (__Pyx_AsyncGen_CheckExact(gen)) {
msg = "can't send non-None value to a just-started async generator";
#endif
} else {
msg = "can't send non-None value to a just-started generator";
}
PyErr_SetString(PyExc_TypeError, msg);
}
#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL)
static void __Pyx__Coroutine_AlreadyTerminatedError(CYTHON_UNUSED PyObject *gen, PyObject *value, CYTHON_UNUSED int closing) {
#ifdef __Pyx_Coroutine_USED
if (!closing && __Pyx_Coroutine_Check(gen)) {
// `self` is an exhausted coroutine: raise an error,
// except when called from gen_close(), which should
// always be a silent method.
PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine");
} else
#endif
if (value) {
// `gen` is an exhausted generator:
// only set exception if called from send().
#ifdef __Pyx_AsyncGen_USED
if (__Pyx_AsyncGen_CheckExact(gen))
PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration);
else
#endif
PyErr_SetNone(PyExc_StopIteration);
}
}
static
PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) {
__Pyx_PyThreadState_declare
PyThreadState *tstate;
__Pyx_ExcInfoStruct *exc_state;
PyObject *retval;
assert(!self->is_running);
if (unlikely(self->resume_label == 0)) {
if (unlikely(value && value != Py_None)) {
return __Pyx_Coroutine_NotStartedError((PyObject*)self);
}
}
if (unlikely(self->resume_label == -1)) {
return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing);
}
#if CYTHON_FAST_THREAD_STATE
__Pyx_PyThreadState_assign
tstate = $local_tstate_cname;
#else
tstate = __Pyx_PyThreadState_Current;
#endif
// Traceback/Frame rules pre-Py3.7:
// - on entry, save external exception state in self->gi_exc_state, restore it on exit
// - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else
// - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame
// - on exit, clear "f_back" of internal exception traceback
// - do not touch external frames and tracebacks
// Traceback/Frame rules for Py3.7+ (CYTHON_USE_EXC_INFO_STACK):
// - on entry, push internal exception state in self->gi_exc_state on the exception stack
// - on exit, keep internally generated exceptions in self->gi_exc_state, clear everything else
// - on entry, set "f_back" pointer of internal exception traceback to (current) outer call frame
// - on exit, clear "f_back" of internal exception traceback
// - do not touch external frames and tracebacks
exc_state = &self->gi_exc_state;
if (exc_state->exc_type) {
#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
// FIXME: what to do in PyPy?
#else
// Generators always return to their most recent caller, not
// necessarily their creator.
if (exc_state->exc_traceback) {
PyTracebackObject *tb = (PyTracebackObject *) exc_state->exc_traceback;
PyFrameObject *f = tb->tb_frame;
assert(f->f_back == NULL);
#if PY_VERSION_HEX >= 0x030B00A1
// PyThreadState_GetFrame returns NULL if there isn't a current frame
// which is a valid state so no need to check
f->f_back = PyThreadState_GetFrame(tstate);
#else
Py_XINCREF(tstate->frame);
f->f_back = tstate->frame;
#endif
}
#endif
}
#if CYTHON_USE_EXC_INFO_STACK
// See https://bugs.python.org/issue25612
exc_state->previous_item = tstate->exc_info;
tstate->exc_info = exc_state;
#else
if (exc_state->exc_type) {
// We were in an except handler when we left,
// restore the exception state which was put aside.
__Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback);
// self->exc_* now holds the exception state of the caller
} else {
// save away the exception state of the caller
__Pyx_Coroutine_ExceptionClear(exc_state);
__Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback);
}
#endif
self->is_running = 1;
retval = self->body((PyObject *) self, tstate, value);
self->is_running = 0;
#if CYTHON_USE_EXC_INFO_STACK
// See https://bugs.python.org/issue25612
exc_state = &self->gi_exc_state;
tstate->exc_info = exc_state->previous_item;
exc_state->previous_item = NULL;
// Cut off the exception frame chain so that we can reconnect it on re-entry above.
__Pyx_Coroutine_ResetFrameBackpointer(exc_state);
#endif
return retval;
}
static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) {
// Don't keep the reference to f_back any longer than necessary. It
// may keep a chain of frames alive or it could create a reference
// cycle.
PyObject *exc_tb = exc_state->exc_traceback;
if (likely(exc_tb)) {
#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
// FIXME: what to do in PyPy?
#else
PyTracebackObject *tb = (PyTracebackObject *) exc_tb;
PyFrameObject *f = tb->tb_frame;
Py_CLEAR(f->f_back);
#endif
}
}
static CYTHON_INLINE
PyObject *__Pyx_Coroutine_MethodReturn(CYTHON_UNUSED PyObject* gen, PyObject *retval) {
if (unlikely(!retval)) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (!__Pyx_PyErr_Occurred()) {
// method call must not terminate with NULL without setting an exception
PyObject *exc = PyExc_StopIteration;
#ifdef __Pyx_AsyncGen_USED
if (__Pyx_AsyncGen_CheckExact(gen))
exc = __Pyx_PyExc_StopAsyncIteration;
#endif
__Pyx_PyErr_SetNone(exc);
}
}
return retval;
}
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
static CYTHON_INLINE
PyObject *__Pyx_PyGen_Send(PyGenObject *gen, PyObject *arg) {
#if PY_VERSION_HEX <= 0x030A00A1
return _PyGen_Send(gen, arg);
#else
PyObject *result;
// PyIter_Send() asserts non-NULL arg
if (PyIter_Send((PyObject*)gen, arg ? arg : Py_None, &result) == PYGEN_RETURN) {
if (PyAsyncGen_CheckExact(gen)) {
assert(result == Py_None);
PyErr_SetNone(PyExc_StopAsyncIteration);
}
else if (result == Py_None) {
PyErr_SetNone(PyExc_StopIteration);
}
else {
_PyGen_SetStopIterationValue(result);
}
Py_CLEAR(result);
}
return result;
#endif
}
#endif
static CYTHON_INLINE
PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) {
PyObject *ret;
PyObject *val = NULL;
__Pyx_Coroutine_Undelegate(gen);
__Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val);
// val == NULL on failure => pass on exception
ret = __Pyx_Coroutine_SendEx(gen, val, 0);
Py_XDECREF(val);
return ret;
}
static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) {
PyObject *retval;
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self;
PyObject *yf = gen->yieldfrom;
if (unlikely(gen->is_running))
return __Pyx_Coroutine_AlreadyRunningError(gen);
if (yf) {
PyObject *ret;
// FIXME: does this really need an INCREF() ?
//Py_INCREF(yf);
gen->is_running = 1;
#ifdef __Pyx_Generator_USED
if (__Pyx_Generator_CheckExact(yf)) {
ret = __Pyx_Coroutine_Send(yf, value);
} else
#endif
#ifdef __Pyx_Coroutine_USED
if (__Pyx_Coroutine_Check(yf)) {
ret = __Pyx_Coroutine_Send(yf, value);
} else
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_PyAsyncGenASend_CheckExact(yf)) {
ret = __Pyx_async_gen_asend_send(yf, value);
} else
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
// _PyGen_Send() is not exported before Py3.6
if (PyGen_CheckExact(yf)) {
ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value);
} else
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03050000 && defined(PyCoro_CheckExact) && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
// _PyGen_Send() is not exported before Py3.6
if (PyCoro_CheckExact(yf)) {
ret = __Pyx_PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value);
} else
#endif
{
if (value == Py_None)
ret = Py_TYPE(yf)->tp_iternext(yf);
else
ret = __Pyx_PyObject_CallMethod1(yf, PYIDENT("send"), value);
}
gen->is_running = 0;
//Py_DECREF(yf);
if (likely(ret)) {
return ret;
}
retval = __Pyx_Coroutine_FinishDelegation(gen);
} else {
retval = __Pyx_Coroutine_SendEx(gen, value, 0);
}
return __Pyx_Coroutine_MethodReturn(self, retval);
}
// This helper function is used by gen_close and gen_throw to
// close a subiterator being delegated to by yield-from.
static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) {
PyObject *retval = NULL;
int err = 0;
#ifdef __Pyx_Generator_USED
if (__Pyx_Generator_CheckExact(yf)) {
retval = __Pyx_Coroutine_Close(yf);
if (!retval)
return -1;
} else
#endif
#ifdef __Pyx_Coroutine_USED
if (__Pyx_Coroutine_Check(yf)) {
retval = __Pyx_Coroutine_Close(yf);
if (!retval)
return -1;
} else
if (__Pyx_CoroutineAwait_CheckExact(yf)) {
retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL);
if (!retval)
return -1;
} else
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_PyAsyncGenASend_CheckExact(yf)) {
retval = __Pyx_async_gen_asend_close(yf, NULL);
// cannot fail
} else
if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) {
retval = __Pyx_async_gen_athrow_close(yf, NULL);
// cannot fail
} else
#endif
{
PyObject *meth;
gen->is_running = 1;
meth = __Pyx_PyObject_GetAttrStr(yf, PYIDENT("close"));
if (unlikely(!meth)) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_WriteUnraisable(yf);
}
PyErr_Clear();
} else {
retval = PyObject_CallFunction(meth, NULL);
Py_DECREF(meth);
if (!retval)
err = -1;
}
gen->is_running = 0;
}
Py_XDECREF(retval);
return err;
}
static PyObject *__Pyx_Generator_Next(PyObject *self) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self;
PyObject *yf = gen->yieldfrom;
if (unlikely(gen->is_running))
return __Pyx_Coroutine_AlreadyRunningError(gen);
if (yf) {
PyObject *ret;
// FIXME: does this really need an INCREF() ?
//Py_INCREF(yf);
// YieldFrom code ensures that yf is an iterator
gen->is_running = 1;
#ifdef __Pyx_Generator_USED
if (__Pyx_Generator_CheckExact(yf)) {
ret = __Pyx_Generator_Next(yf);
} else
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
// _PyGen_Send() is not exported before Py3.6
if (PyGen_CheckExact(yf)) {
ret = __Pyx_PyGen_Send((PyGenObject*)yf, NULL);
} else
#endif
#ifdef __Pyx_Coroutine_USED
if (__Pyx_Coroutine_Check(yf)) {
ret = __Pyx_Coroutine_Send(yf, Py_None);
} else
#endif
ret = Py_TYPE(yf)->tp_iternext(yf);
gen->is_running = 0;
//Py_DECREF(yf);
if (likely(ret)) {
return ret;
}
return __Pyx_Coroutine_FinishDelegation(gen);
}
return __Pyx_Coroutine_SendEx(gen, Py_None, 0);
}
static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, CYTHON_UNUSED PyObject *arg) {
return __Pyx_Coroutine_Close(self);
}
static PyObject *__Pyx_Coroutine_Close(PyObject *self) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
PyObject *retval, *raised_exception;
PyObject *yf = gen->yieldfrom;
int err = 0;
if (unlikely(gen->is_running))
return __Pyx_Coroutine_AlreadyRunningError(gen);
if (yf) {
Py_INCREF(yf);
err = __Pyx_Coroutine_CloseIter(gen, yf);
__Pyx_Coroutine_Undelegate(gen);
Py_DECREF(yf);
}
if (err == 0)
PyErr_SetNone(PyExc_GeneratorExit);
retval = __Pyx_Coroutine_SendEx(gen, NULL, 1);
if (unlikely(retval)) {
const char *msg;
Py_DECREF(retval);
if ((0)) {
#ifdef __Pyx_Coroutine_USED
} else if (__Pyx_Coroutine_Check(self)) {
msg = "coroutine ignored GeneratorExit";
#endif
#ifdef __Pyx_AsyncGen_USED
} else if (__Pyx_AsyncGen_CheckExact(self)) {
#if PY_VERSION_HEX < 0x03060000
msg = "async generator ignored GeneratorExit - might require Python 3.6+ finalisation (PEP 525)";
#else
msg = "async generator ignored GeneratorExit";
#endif
#endif
} else {
msg = "generator ignored GeneratorExit";
}
PyErr_SetString(PyExc_RuntimeError, msg);
return NULL;
}
raised_exception = PyErr_Occurred();
if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) {
// ignore these errors
if (raised_exception) PyErr_Clear();
Py_INCREF(Py_None);
return Py_None;
}
return NULL;
}
static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb,
PyObject *args, int close_on_genexit) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
PyObject *yf = gen->yieldfrom;
if (unlikely(gen->is_running))
return __Pyx_Coroutine_AlreadyRunningError(gen);
if (yf) {
PyObject *ret;
Py_INCREF(yf);
if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) {
// Asynchronous generators *should not* be closed right away.
// We have to allow some awaits to work it through, hence the
// `close_on_genexit` parameter here.
int err = __Pyx_Coroutine_CloseIter(gen, yf);
Py_DECREF(yf);
__Pyx_Coroutine_Undelegate(gen);
if (err < 0)
return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0));
goto throw_here;
}
gen->is_running = 1;
if (0
#ifdef __Pyx_Generator_USED
|| __Pyx_Generator_CheckExact(yf)
#endif
#ifdef __Pyx_Coroutine_USED
|| __Pyx_Coroutine_Check(yf)
#endif
) {
ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit);
#ifdef __Pyx_Coroutine_USED
} else if (__Pyx_CoroutineAwait_CheckExact(yf)) {
ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit);
#endif
} else {
PyObject *meth = __Pyx_PyObject_GetAttrStr(yf, PYIDENT("throw"));
if (unlikely(!meth)) {
Py_DECREF(yf);
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {
gen->is_running = 0;
return NULL;
}
PyErr_Clear();
__Pyx_Coroutine_Undelegate(gen);
gen->is_running = 0;
goto throw_here;
}
if (likely(args)) {
ret = PyObject_CallObject(meth, args);
} else {
// "tb" or even "val" might be NULL, but that also correctly terminates the argument list
ret = PyObject_CallFunctionObjArgs(meth, typ, val, tb, NULL);
}
Py_DECREF(meth);
}
gen->is_running = 0;
Py_DECREF(yf);
if (!ret) {
ret = __Pyx_Coroutine_FinishDelegation(gen);
}
return __Pyx_Coroutine_MethodReturn(self, ret);
}
throw_here:
__Pyx_Raise(typ, val, tb, NULL);
return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0));
}
static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) {
PyObject *typ;
PyObject *val = NULL;
PyObject *tb = NULL;
if (!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb))
return NULL;
return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1);
}
static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) {
Py_VISIT(exc_state->exc_type);
Py_VISIT(exc_state->exc_value);
Py_VISIT(exc_state->exc_traceback);
return 0;
}
static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) {
Py_VISIT(gen->closure);
Py_VISIT(gen->classobj);
Py_VISIT(gen->yieldfrom);
return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg);
}
static int __Pyx_Coroutine_clear(PyObject *self) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
Py_CLEAR(gen->closure);
Py_CLEAR(gen->classobj);
Py_CLEAR(gen->yieldfrom);
__Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state);
#ifdef __Pyx_AsyncGen_USED
if (__Pyx_AsyncGen_CheckExact(self)) {
Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer);
}
#endif
Py_CLEAR(gen->gi_code);
Py_CLEAR(gen->gi_frame);
Py_CLEAR(gen->gi_name);
Py_CLEAR(gen->gi_qualname);
Py_CLEAR(gen->gi_modulename);
return 0;
}
static void __Pyx_Coroutine_dealloc(PyObject *self) {
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
PyObject_GC_UnTrack(gen);
if (gen->gi_weakreflist != NULL)
PyObject_ClearWeakRefs(self);
if (gen->resume_label >= 0) {
// Generator is paused or unstarted, so we need to close
PyObject_GC_Track(self);
#if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE
if (PyObject_CallFinalizerFromDealloc(self))
#else
Py_TYPE(gen)->tp_del(self);
if (Py_REFCNT(self) > 0)
#endif
{
// resurrected. :(
return;
}
PyObject_GC_UnTrack(self);
}
#ifdef __Pyx_AsyncGen_USED
if (__Pyx_AsyncGen_CheckExact(self)) {
/* We have to handle this case for asynchronous generators
right here, because this code has to be between UNTRACK
and GC_Del. */
Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer);
}
#endif
__Pyx_Coroutine_clear(self);
PyObject_GC_Del(gen);
}
static void __Pyx_Coroutine_del(PyObject *self) {
PyObject *error_type, *error_value, *error_traceback;
__pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
__Pyx_PyThreadState_declare
if (gen->resume_label < 0) {
// already terminated => nothing to clean up
return;
}
#if !CYTHON_USE_TP_FINALIZE
// Temporarily resurrect the object.
assert(self->ob_refcnt == 0);
__Pyx_SET_REFCNT(self, 1);
#endif
__Pyx_PyThreadState_assign
// Save the current exception, if any.
__Pyx_ErrFetch(&error_type, &error_value, &error_traceback);
#ifdef __Pyx_AsyncGen_USED
if (__Pyx_AsyncGen_CheckExact(self)) {
__pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self;
PyObject *finalizer = agen->ag_finalizer;
if (finalizer && !agen->ag_closed) {
PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self);
if (unlikely(!res)) {
PyErr_WriteUnraisable(self);
} else {
Py_DECREF(res);
}
// Restore the saved exception.
__Pyx_ErrRestore(error_type, error_value, error_traceback);
return;
}
}
#endif
if (unlikely(gen->resume_label == 0 && !error_value)) {
#ifdef __Pyx_Coroutine_USED
#ifdef __Pyx_Generator_USED
// only warn about (async) coroutines
if (!__Pyx_Generator_CheckExact(self))
#endif
{
// untrack dead object as we are executing Python code (which might trigger GC)
PyObject_GC_UnTrack(self);
#if PY_MAJOR_VERSION >= 3 /* PY_VERSION_HEX >= 0x03030000*/ || defined(PyErr_WarnFormat)
if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0))
PyErr_WriteUnraisable(self);
#else
{PyObject *msg;
char *cmsg;
#if CYTHON_COMPILING_IN_PYPY
msg = NULL;
cmsg = (char*) "coroutine was never awaited";
#else
char *cname;
PyObject *qualname;
qualname = gen->gi_qualname;
cname = PyString_AS_STRING(qualname);
msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname);
if (unlikely(!msg)) {
PyErr_Clear();
cmsg = (char*) "coroutine was never awaited";
} else {
cmsg = PyString_AS_STRING(msg);
}
#endif
if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, cmsg, 1) < 0))
PyErr_WriteUnraisable(self);
Py_XDECREF(msg);}
#endif
PyObject_GC_Track(self);
}
#endif /*__Pyx_Coroutine_USED*/
} else {
PyObject *res = __Pyx_Coroutine_Close(self);
if (unlikely(!res)) {
if (PyErr_Occurred())
PyErr_WriteUnraisable(self);
} else {
Py_DECREF(res);
}
}
// Restore the saved exception.
__Pyx_ErrRestore(error_type, error_value, error_traceback);
#if !CYTHON_USE_TP_FINALIZE
// Undo the temporary resurrection; can't use DECREF here, it would
// cause a recursive call.
assert(Py_REFCNT(self) > 0);
if (--self->ob_refcnt == 0) {
// this is the normal path out
return;
}
// close() resurrected it! Make it look like the original Py_DECREF
// never happened.
{
Py_ssize_t refcnt = Py_REFCNT(self);
_Py_NewReference(self);
__Pyx_SET_REFCNT(self, refcnt);
}
#if CYTHON_COMPILING_IN_CPYTHON
assert(PyType_IS_GC(Py_TYPE(self)) &&
_Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED);
// If Py_REF_DEBUG, _Py_NewReference bumped _Py_RefTotal, so
// we need to undo that.
_Py_DEC_REFTOTAL;
#endif
// If Py_TRACE_REFS, _Py_NewReference re-added self to the object
// chain, so no more to do there.
// If COUNT_ALLOCS, the original decref bumped tp_frees, and
// _Py_NewReference bumped tp_allocs: both of those need to be
// undone.
#ifdef COUNT_ALLOCS
--Py_TYPE(self)->tp_frees;
--Py_TYPE(self)->tp_allocs;
#endif
#endif
}
static PyObject *
__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context)
{
PyObject *name = self->gi_name;
// avoid NULL pointer dereference during garbage collection
if (unlikely(!name)) name = Py_None;
Py_INCREF(name);
return name;
}
static int
__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__name__ must be set to a string object");
return -1;
}
tmp = self->gi_name;
Py_INCREF(value);
self->gi_name = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context)
{
PyObject *name = self->gi_qualname;
// avoid NULL pointer dereference during garbage collection
if (unlikely(!name)) name = Py_None;
Py_INCREF(name);
return name;
}
static int
__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__qualname__ must be set to a string object");
return -1;
}
tmp = self->gi_qualname;
Py_INCREF(value);
self->gi_qualname = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_Coroutine_get_frame(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context)
{
PyObject *frame = self->gi_frame;
if (!frame) {
if (unlikely(!self->gi_code)) {
// Avoid doing something stupid, e.g. during garbage collection.
Py_RETURN_NONE;
}
frame = (PyObject *) PyFrame_New(
PyThreadState_Get(), /*PyThreadState *tstate,*/
(PyCodeObject*) self->gi_code, /*PyCodeObject *code,*/
$moddict_cname, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (unlikely(!frame))
return NULL;
// keep the frame cached once it's created
self->gi_frame = frame;
}
Py_INCREF(frame);
return frame;
}
static __pyx_CoroutineObject *__Pyx__Coroutine_New(
PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
__pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type);
if (unlikely(!gen))
return NULL;
return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name);
}
static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
__pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
PyObject *name, PyObject *qualname, PyObject *module_name) {
gen->body = body;
gen->closure = closure;
Py_XINCREF(closure);
gen->is_running = 0;
gen->resume_label = 0;
gen->classobj = NULL;
gen->yieldfrom = NULL;
gen->gi_exc_state.exc_type = NULL;
gen->gi_exc_state.exc_value = NULL;
gen->gi_exc_state.exc_traceback = NULL;
#if CYTHON_USE_EXC_INFO_STACK
gen->gi_exc_state.previous_item = NULL;
#endif
gen->gi_weakreflist = NULL;
Py_XINCREF(qualname);
gen->gi_qualname = qualname;
Py_XINCREF(name);
gen->gi_name = name;
Py_XINCREF(module_name);
gen->gi_modulename = module_name;
Py_XINCREF(code);
gen->gi_code = code;
gen->gi_frame = NULL;
PyObject_GC_Track(gen);
return gen;
}
//////////////////// Coroutine ////////////////////
//@requires: CoroutineBase
//@requires: PatchGeneratorABC
//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
static void __Pyx_CoroutineAwait_dealloc(PyObject *self) {
PyObject_GC_UnTrack(self);
Py_CLEAR(((__pyx_CoroutineAwaitObject*)self)->coroutine);
PyObject_GC_Del(self);
}
static int __Pyx_CoroutineAwait_traverse(__pyx_CoroutineAwaitObject *self, visitproc visit, void *arg) {
Py_VISIT(self->coroutine);
return 0;
}
static int __Pyx_CoroutineAwait_clear(__pyx_CoroutineAwaitObject *self) {
Py_CLEAR(self->coroutine);
return 0;
}
static PyObject *__Pyx_CoroutineAwait_Next(__pyx_CoroutineAwaitObject *self) {
return __Pyx_Generator_Next(self->coroutine);
}
static PyObject *__Pyx_CoroutineAwait_Send(__pyx_CoroutineAwaitObject *self, PyObject *value) {
return __Pyx_Coroutine_Send(self->coroutine, value);
}
static PyObject *__Pyx_CoroutineAwait_Throw(__pyx_CoroutineAwaitObject *self, PyObject *args) {
return __Pyx_Coroutine_Throw(self->coroutine, args);
}
static PyObject *__Pyx_CoroutineAwait_Close(__pyx_CoroutineAwaitObject *self, CYTHON_UNUSED PyObject *arg) {
return __Pyx_Coroutine_Close(self->coroutine);
}
static PyObject *__Pyx_CoroutineAwait_self(PyObject *self) {
Py_INCREF(self);
return self;
}
#if !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_CoroutineAwait_no_new(CYTHON_UNUSED PyTypeObject *type, CYTHON_UNUSED PyObject *args, CYTHON_UNUSED PyObject *kwargs) {
PyErr_SetString(PyExc_TypeError, "cannot instantiate type, use 'await coroutine' instead");
return NULL;
}
#endif
static PyMethodDef __pyx_CoroutineAwait_methods[] = {
{"send", (PyCFunction) __Pyx_CoroutineAwait_Send, METH_O,
(char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next yielded value or raise StopIteration.")},
{"throw", (PyCFunction) __Pyx_CoroutineAwait_Throw, METH_VARARGS,
(char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next yielded value or raise StopIteration.")},
{"close", (PyCFunction) __Pyx_CoroutineAwait_Close, METH_NOARGS,
(char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_CoroutineAwaitType_type = {
PyVarObject_HEAD_INIT(0, 0)
"coroutine_wrapper", /*tp_name*/
sizeof(__pyx_CoroutineAwaitObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_CoroutineAwait_dealloc,/*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_as_async resp. tp_compare*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/
PyDoc_STR("A wrapper object implementing __await__ for coroutines."), /*tp_doc*/
(traverseproc) __Pyx_CoroutineAwait_traverse, /*tp_traverse*/
(inquiry) __Pyx_CoroutineAwait_clear, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
__Pyx_CoroutineAwait_self, /*tp_iter*/
(iternextfunc) __Pyx_CoroutineAwait_Next, /*tp_iternext*/
__pyx_CoroutineAwait_methods, /*tp_methods*/
0 , /*tp_members*/
0 , /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
#if !CYTHON_COMPILING_IN_PYPY
__Pyx_CoroutineAwait_no_new, /*tp_new*/
#else
0, /*tp_new*/
#endif
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
#if PY_VERSION_HEX < 0x030500B1 || defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS
static CYTHON_INLINE PyObject *__Pyx__Coroutine_await(PyObject *coroutine) {
__pyx_CoroutineAwaitObject *await = PyObject_GC_New(__pyx_CoroutineAwaitObject, __pyx_CoroutineAwaitType);
if (unlikely(!await)) return NULL;
Py_INCREF(coroutine);
await->coroutine = coroutine;
PyObject_GC_Track(await);
return (PyObject*)await;
}
#endif
#if PY_VERSION_HEX < 0x030500B1
static PyObject *__Pyx_Coroutine_await_method(PyObject *coroutine, CYTHON_UNUSED PyObject *arg) {
return __Pyx__Coroutine_await(coroutine);
}
#endif
#if defined(__Pyx_IterableCoroutine_USED) || CYTHON_USE_ASYNC_SLOTS
static PyObject *__Pyx_Coroutine_await(PyObject *coroutine) {
if (unlikely(!coroutine || !__Pyx_Coroutine_Check(coroutine))) {
PyErr_SetString(PyExc_TypeError, "invalid input, expected coroutine");
return NULL;
}
return __Pyx__Coroutine_await(coroutine);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
static PyObject *__Pyx_Coroutine_compare(PyObject *obj, PyObject *other, int op) {
PyObject* result;
switch (op) {
case Py_EQ: result = (other == obj) ? Py_True : Py_False; break;
case Py_NE: result = (other != obj) ? Py_True : Py_False; break;
default:
result = Py_NotImplemented;
}
Py_INCREF(result);
return result;
}
#endif
static PyMethodDef __pyx_Coroutine_methods[] = {
{"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O,
(char*) PyDoc_STR("send(arg) -> send 'arg' into coroutine,\nreturn next iterated value or raise StopIteration.")},
{"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS,
(char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in coroutine,\nreturn next iterated value or raise StopIteration.")},
{"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS,
(char*) PyDoc_STR("close() -> raise GeneratorExit inside coroutine.")},
#if PY_VERSION_HEX < 0x030500B1
{"__await__", (PyCFunction) __Pyx_Coroutine_await_method, METH_NOARGS,
(char*) PyDoc_STR("__await__() -> return an iterator to be used in await expression.")},
#endif
{0, 0, 0, 0}
};
static PyMemberDef __pyx_Coroutine_memberlist[] = {
{(char *) "cr_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
{(char*) "cr_await", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being awaited, or None")},
{(char*) "cr_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL},
{(char *) "__module__", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_modulename), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
static PyGetSetDef __pyx_Coroutine_getsets[] = {
{(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
(char*) PyDoc_STR("name of the coroutine"), 0},
{(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
(char*) PyDoc_STR("qualified name of the coroutine"), 0},
{(char *) "cr_frame", (getter)__Pyx_Coroutine_get_frame, NULL,
(char*) PyDoc_STR("Frame of the coroutine"), 0},
{0, 0, 0, 0, 0}
};
#if CYTHON_USE_ASYNC_SLOTS
static __Pyx_PyAsyncMethodsStruct __pyx_Coroutine_as_async = {
__Pyx_Coroutine_await, /*am_await*/
0, /*am_aiter*/
0, /*am_anext*/
#if PY_VERSION_HEX >= 0x030A00A3
0, /*am_send*/
#endif
};
#endif
static PyTypeObject __pyx_CoroutineType_type = {
PyVarObject_HEAD_INIT(0, 0)
"coroutine", /*tp_name*/
sizeof(__pyx_CoroutineObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if CYTHON_USE_ASYNC_SLOTS
&__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */
#else
0, /*tp_reserved*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/
0, /*tp_doc*/
(traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/
0, /*tp_clear*/
#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
// in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
__Pyx_Coroutine_compare, /*tp_richcompare*/
#else
0, /*tp_richcompare*/
#endif
offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/
// no tp_iter() as iterator is only available through __await__()
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_Coroutine_methods, /*tp_methods*/
__pyx_Coroutine_memberlist, /*tp_members*/
__pyx_Coroutine_getsets, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
0, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
#if CYTHON_USE_TP_FINALIZE
0, /*tp_del*/
#else
__Pyx_Coroutine_del, /*tp_del*/
#endif
0, /*tp_version_tag*/
#if CYTHON_USE_TP_FINALIZE
__Pyx_Coroutine_del, /*tp_finalize*/
#elif PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static int __pyx_Coroutine_init(void) {
// on Windows, C-API functions can't be used in slots statically
__pyx_CoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_CoroutineType = __Pyx_FetchCommonType(&__pyx_CoroutineType_type);
if (unlikely(!__pyx_CoroutineType))
return -1;
#ifdef __Pyx_IterableCoroutine_USED
if (unlikely(__pyx_IterableCoroutine_init() == -1))
return -1;
#endif
__pyx_CoroutineAwaitType = __Pyx_FetchCommonType(&__pyx_CoroutineAwaitType_type);
if (unlikely(!__pyx_CoroutineAwaitType))
return -1;
return 0;
}
//////////////////// IterableCoroutine.proto ////////////////////
#define __Pyx_IterableCoroutine_USED
static PyTypeObject *__pyx_IterableCoroutineType = 0;
#undef __Pyx_Coroutine_Check
#define __Pyx_Coroutine_Check(obj) (__Pyx_Coroutine_CheckExact(obj) || (Py_TYPE(obj) == __pyx_IterableCoroutineType))
#define __Pyx_IterableCoroutine_New(body, code, closure, name, qualname, module_name) \
__Pyx__Coroutine_New(__pyx_IterableCoroutineType, body, code, closure, name, qualname, module_name)
static int __pyx_IterableCoroutine_init(void);/*proto*/
//////////////////// IterableCoroutine ////////////////////
//@requires: Coroutine
//@requires: CommonStructures.c::FetchCommonType
static PyTypeObject __pyx_IterableCoroutineType_type = {
PyVarObject_HEAD_INIT(0, 0)
"iterable_coroutine", /*tp_name*/
sizeof(__pyx_CoroutineObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if CYTHON_USE_ASYNC_SLOTS
&__pyx_Coroutine_as_async, /*tp_as_async (tp_reserved) - Py3 only! */
#else
0, /*tp_reserved*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/
0, /*tp_doc*/
(traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/
0, /*tp_clear*/
#if CYTHON_USE_ASYNC_SLOTS && CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 && PY_VERSION_HEX < 0x030500B1
// in order to (mis-)use tp_reserved above, we must also implement tp_richcompare
__Pyx_Coroutine_compare, /*tp_richcompare*/
#else
0, /*tp_richcompare*/
#endif
offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/
// enable iteration for legacy support of asyncio yield-from protocol
__Pyx_Coroutine_await, /*tp_iter*/
(iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/
__pyx_Coroutine_methods, /*tp_methods*/
__pyx_Coroutine_memberlist, /*tp_members*/
__pyx_Coroutine_getsets, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
0, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_del*/
#else
__Pyx_Coroutine_del, /*tp_del*/
#endif
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
__Pyx_Coroutine_del, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static int __pyx_IterableCoroutine_init(void) {
__pyx_IterableCoroutineType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_IterableCoroutineType = __Pyx_FetchCommonType(&__pyx_IterableCoroutineType_type);
if (unlikely(!__pyx_IterableCoroutineType))
return -1;
return 0;
}
//////////////////// Generator ////////////////////
//@requires: CoroutineBase
//@requires: PatchGeneratorABC
//@requires: ObjectHandling.c::PyObject_GenericGetAttrNoDict
static PyMethodDef __pyx_Generator_methods[] = {
{"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O,
(char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")},
{"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS,
(char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")},
{"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS,
(char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")},
{0, 0, 0, 0}
};
static PyMemberDef __pyx_Generator_memberlist[] = {
{(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
{(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
(char*) PyDoc_STR("object being iterated by 'yield from', or None")},
{(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL},
{0, 0, 0, 0, 0}
};
static PyGetSetDef __pyx_Generator_getsets[] = {
{(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
(char*) PyDoc_STR("name of the generator"), 0},
{(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
(char*) PyDoc_STR("qualified name of the generator"), 0},
{(char *) "gi_frame", (getter)__Pyx_Coroutine_get_frame, NULL,
(char*) PyDoc_STR("Frame of the generator"), 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_GeneratorType_type = {
PyVarObject_HEAD_INIT(0, 0)
"generator", /*tp_name*/
sizeof(__pyx_CoroutineObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_Coroutine_dealloc,/*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare / tp_as_async*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE, /*tp_flags*/
0, /*tp_doc*/
(traverseproc) __Pyx_Coroutine_traverse, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
offsetof(__pyx_CoroutineObject, gi_weakreflist), /*tp_weaklistoffset*/
0, /*tp_iter*/
(iternextfunc) __Pyx_Generator_Next, /*tp_iternext*/
__pyx_Generator_methods, /*tp_methods*/
__pyx_Generator_memberlist, /*tp_members*/
__pyx_Generator_getsets, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
0, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
#if CYTHON_USE_TP_FINALIZE
0, /*tp_del*/
#else
__Pyx_Coroutine_del, /*tp_del*/
#endif
0, /*tp_version_tag*/
#if CYTHON_USE_TP_FINALIZE
__Pyx_Coroutine_del, /*tp_finalize*/
#elif PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static int __pyx_Generator_init(void) {
// on Windows, C-API functions can't be used in slots statically
__pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
__pyx_GeneratorType_type.tp_iter = PyObject_SelfIter;
__pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type);
if (unlikely(!__pyx_GeneratorType)) {
return -1;
}
return 0;
}
/////////////// ReturnWithStopIteration.proto ///////////////
#define __Pyx_ReturnWithStopIteration(value) \
if (value == Py_None) PyErr_SetNone(PyExc_StopIteration); else __Pyx__ReturnWithStopIteration(value)
static void __Pyx__ReturnWithStopIteration(PyObject* value); /*proto*/
/////////////// ReturnWithStopIteration ///////////////
//@requires: Exceptions.c::PyErrFetchRestore
//@requires: Exceptions.c::PyThreadStateGet
//@substitute: naming
// 1) Instantiating an exception just to pass back a value is costly.
// 2) CPython 3.3 <= x < 3.5b1 crash in yield-from when the StopIteration is not instantiated.
// 3) Passing a tuple as value into PyErr_SetObject() passes its items on as arguments.
// 4) Passing an exception as value will interpret it as an exception on unpacking and raise it (or unpack its value).
// 5) If there is currently an exception being handled, we need to chain it.
static void __Pyx__ReturnWithStopIteration(PyObject* value) {
PyObject *exc, *args;
#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_PYSTON
__Pyx_PyThreadState_declare
if ((PY_VERSION_HEX >= 0x03030000 && PY_VERSION_HEX < 0x030500B1)
|| unlikely(PyTuple_Check(value) || PyExceptionInstance_Check(value))) {
args = PyTuple_New(1);
if (unlikely(!args)) return;
Py_INCREF(value);
PyTuple_SET_ITEM(args, 0, value);
exc = PyType_Type.tp_call(PyExc_StopIteration, args, NULL);
Py_DECREF(args);
if (!exc) return;
} else {
// it's safe to avoid instantiating the exception
Py_INCREF(value);
exc = value;
}
#if CYTHON_FAST_THREAD_STATE
__Pyx_PyThreadState_assign
#if CYTHON_USE_EXC_INFO_STACK
if (!$local_tstate_cname->exc_info->exc_type)
#else
if (!$local_tstate_cname->exc_type)
#endif
{
// no chaining needed => avoid the overhead in PyErr_SetObject()
Py_INCREF(PyExc_StopIteration);
__Pyx_ErrRestore(PyExc_StopIteration, exc, NULL);
return;
}
#endif
#else
args = PyTuple_Pack(1, value);
if (unlikely(!args)) return;
exc = PyObject_Call(PyExc_StopIteration, args, NULL);
Py_DECREF(args);
if (unlikely(!exc)) return;
#endif
PyErr_SetObject(PyExc_StopIteration, exc);
Py_DECREF(exc);
}
//////////////////// PatchModuleWithCoroutine.proto ////////////////////
static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code); /*proto*/
//////////////////// PatchModuleWithCoroutine ////////////////////
//@substitute: naming
static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) {
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
int result;
PyObject *globals, *result_obj;
globals = PyDict_New(); if (unlikely(!globals)) goto ignore;
result = PyDict_SetItemString(globals, "_cython_coroutine_type",
#ifdef __Pyx_Coroutine_USED
(PyObject*)__pyx_CoroutineType);
#else
Py_None);
#endif
if (unlikely(result < 0)) goto ignore;
result = PyDict_SetItemString(globals, "_cython_generator_type",
#ifdef __Pyx_Generator_USED
(PyObject*)__pyx_GeneratorType);
#else
Py_None);
#endif
if (unlikely(result < 0)) goto ignore;
if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore;
if (unlikely(PyDict_SetItemString(globals, "__builtins__", $builtins_cname) < 0)) goto ignore;
result_obj = PyRun_String(py_code, Py_file_input, globals, globals);
if (unlikely(!result_obj)) goto ignore;
Py_DECREF(result_obj);
Py_DECREF(globals);
return module;
ignore:
Py_XDECREF(globals);
PyErr_WriteUnraisable(module);
if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) {
Py_DECREF(module);
module = NULL;
}
#else
// avoid "unused" warning
py_code++;
#endif
return module;
}
//////////////////// PatchGeneratorABC.proto ////////////////////
// register with Generator/Coroutine ABCs in 'collections.abc'
// see https://bugs.python.org/issue24018
static int __Pyx_patch_abc(void); /*proto*/
//////////////////// PatchGeneratorABC ////////////////////
//@requires: PatchModuleWithCoroutine
#ifndef CYTHON_REGISTER_ABCS
#define CYTHON_REGISTER_ABCS 1
#endif
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
static PyObject* __Pyx_patch_abc_module(PyObject *module); /*proto*/
static PyObject* __Pyx_patch_abc_module(PyObject *module) {
module = __Pyx_Coroutine_patch_module(
module, CSTRING("""\
if _cython_generator_type is not None:
try: Generator = _module.Generator
except AttributeError: pass
else: Generator.register(_cython_generator_type)
if _cython_coroutine_type is not None:
try: Coroutine = _module.Coroutine
except AttributeError: pass
else: Coroutine.register(_cython_coroutine_type)
""")
);
return module;
}
#endif
static int __Pyx_patch_abc(void) {
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
static int abc_patched = 0;
if (CYTHON_REGISTER_ABCS && !abc_patched) {
PyObject *module;
module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections");
if (!module) {
PyErr_WriteUnraisable(NULL);
if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning,
((PY_MAJOR_VERSION >= 3) ?
"Cython module failed to register with collections.abc module" :
"Cython module failed to register with collections module"), 1) < 0)) {
return -1;
}
} else {
module = __Pyx_patch_abc_module(module);
abc_patched = 1;
if (unlikely(!module))
return -1;
Py_DECREF(module);
}
// also register with "backports_abc" module if available, just in case
module = PyImport_ImportModule("backports_abc");
if (module) {
module = __Pyx_patch_abc_module(module);
Py_XDECREF(module);
}
if (!module) {
PyErr_Clear();
}
}
#else
// avoid "unused" warning for __Pyx_Coroutine_patch_module()
if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL);
#endif
return 0;
}
//////////////////// PatchAsyncIO.proto ////////////////////
// run after importing "asyncio" to patch Cython generator support into it
static PyObject* __Pyx_patch_asyncio(PyObject* module); /*proto*/
//////////////////// PatchAsyncIO ////////////////////
//@requires: ImportExport.c::Import
//@requires: PatchModuleWithCoroutine
//@requires: PatchInspect
static PyObject* __Pyx_patch_asyncio(PyObject* module) {
#if PY_VERSION_HEX < 0x030500B2 && \
(defined(__Pyx_Coroutine_USED) || defined(__Pyx_Generator_USED)) && \
(!defined(CYTHON_PATCH_ASYNCIO) || CYTHON_PATCH_ASYNCIO)
PyObject *patch_module = NULL;
static int asyncio_patched = 0;
if (unlikely((!asyncio_patched) && module)) {
PyObject *package;
package = __Pyx_Import(PYIDENT("asyncio.coroutines"), NULL, 0);
if (package) {
patch_module = __Pyx_Coroutine_patch_module(
PyObject_GetAttrString(package, "coroutines"), CSTRING("""\
try:
coro_types = _module._COROUTINE_TYPES
except AttributeError: pass
else:
if _cython_coroutine_type is not None and _cython_coroutine_type not in coro_types:
coro_types = tuple(coro_types) + (_cython_coroutine_type,)
if _cython_generator_type is not None and _cython_generator_type not in coro_types:
coro_types = tuple(coro_types) + (_cython_generator_type,)
_module._COROUTINE_TYPES = coro_types
""")
);
} else {
PyErr_Clear();
// Always enable fallback: even if we compile against 3.4.2, we might be running on 3.4.1 at some point.
//#if PY_VERSION_HEX < 0x03040200
// Py3.4.1 used to have asyncio.tasks instead of asyncio.coroutines
package = __Pyx_Import(PYIDENT("asyncio.tasks"), NULL, 0);
if (unlikely(!package)) goto asyncio_done;
patch_module = __Pyx_Coroutine_patch_module(
PyObject_GetAttrString(package, "tasks"), CSTRING("""\
if hasattr(_module, 'iscoroutine'):
old_types = getattr(_module.iscoroutine, '_cython_coroutine_types', None)
if old_types is None or not isinstance(old_types, set):
old_types = set()
def cy_wrap(orig_func, type=type, cython_coroutine_types=old_types):
def cy_iscoroutine(obj): return type(obj) in cython_coroutine_types or orig_func(obj)
cy_iscoroutine._cython_coroutine_types = cython_coroutine_types
return cy_iscoroutine
_module.iscoroutine = cy_wrap(_module.iscoroutine)
if _cython_coroutine_type is not None:
old_types.add(_cython_coroutine_type)
if _cython_generator_type is not None:
old_types.add(_cython_generator_type)
""")
);
//#endif
// Py < 0x03040200
}
Py_DECREF(package);
if (unlikely(!patch_module)) goto ignore;
//#if PY_VERSION_HEX < 0x03040200
asyncio_done:
PyErr_Clear();
//#endif
asyncio_patched = 1;
#ifdef __Pyx_Generator_USED
// now patch inspect.isgenerator() by looking up the imported module in the patched asyncio module
{
PyObject *inspect_module;
if (patch_module) {
inspect_module = PyObject_GetAttr(patch_module, PYIDENT("inspect"));
Py_DECREF(patch_module);
} else {
inspect_module = __Pyx_Import(PYIDENT("inspect"), NULL, 0);
}
if (unlikely(!inspect_module)) goto ignore;
inspect_module = __Pyx_patch_inspect(inspect_module);
if (unlikely(!inspect_module)) {
Py_DECREF(module);
module = NULL;
}
Py_XDECREF(inspect_module);
}
#else
// avoid "unused" warning for __Pyx_patch_inspect()
if ((0)) return __Pyx_patch_inspect(module);
#endif
}
return module;
ignore:
PyErr_WriteUnraisable(module);
if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch asyncio package with custom generator type", 1) < 0)) {
Py_DECREF(module);
module = NULL;
}
#else
// avoid "unused" warning for __Pyx_Coroutine_patch_module()
if ((0)) return __Pyx_patch_inspect(__Pyx_Coroutine_patch_module(module, NULL));
#endif
return module;
}
//////////////////// PatchInspect.proto ////////////////////
// run after importing "inspect" to patch Cython generator support into it
static PyObject* __Pyx_patch_inspect(PyObject* module); /*proto*/
//////////////////// PatchInspect ////////////////////
//@requires: PatchModuleWithCoroutine
static PyObject* __Pyx_patch_inspect(PyObject* module) {
#if defined(__Pyx_Generator_USED) && (!defined(CYTHON_PATCH_INSPECT) || CYTHON_PATCH_INSPECT)
static int inspect_patched = 0;
if (unlikely((!inspect_patched) && module)) {
module = __Pyx_Coroutine_patch_module(
module, CSTRING("""\
old_types = getattr(_module.isgenerator, '_cython_generator_types', None)
if old_types is None or not isinstance(old_types, set):
old_types = set()
def cy_wrap(orig_func, type=type, cython_generator_types=old_types):
def cy_isgenerator(obj): return type(obj) in cython_generator_types or orig_func(obj)
cy_isgenerator._cython_generator_types = cython_generator_types
return cy_isgenerator
_module.isgenerator = cy_wrap(_module.isgenerator)
old_types.add(_cython_generator_type)
""")
);
inspect_patched = 1;
}
#else
// avoid "unused" warning for __Pyx_Coroutine_patch_module()
if ((0)) return __Pyx_Coroutine_patch_module(module, NULL);
#endif
return module;
}
//////////////////// StopAsyncIteration.proto ////////////////////
#define __Pyx_StopAsyncIteration_USED
static PyObject *__Pyx_PyExc_StopAsyncIteration;
static int __pyx_StopAsyncIteration_init(void); /*proto*/
//////////////////// StopAsyncIteration ////////////////////
#if PY_VERSION_HEX < 0x030500B1
static PyTypeObject __Pyx__PyExc_StopAsyncIteration_type = {
PyVarObject_HEAD_INIT(0, 0)
"StopAsyncIteration", /*tp_name*/
sizeof(PyBaseExceptionObject), /*tp_basicsize*/
0, /*tp_itemsize*/
0, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare / reserved*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC, /*tp_flags*/
PyDoc_STR("Signal the end from iterator.__anext__()."), /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
0, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
0, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM+0 >= 0x06000000
0, /*tp_pypy_flags*/
#endif
};
#endif
static int __pyx_StopAsyncIteration_init(void) {
#if PY_VERSION_HEX >= 0x030500B1
__Pyx_PyExc_StopAsyncIteration = PyExc_StopAsyncIteration;
#else
PyObject *builtins = PyEval_GetBuiltins();
if (likely(builtins)) {
PyObject *exc = PyMapping_GetItemString(builtins, (char*) "StopAsyncIteration");
if (exc) {
__Pyx_PyExc_StopAsyncIteration = exc;
return 0;
}
}
PyErr_Clear();
__Pyx__PyExc_StopAsyncIteration_type.tp_traverse = ((PyTypeObject*)PyExc_BaseException)->tp_traverse;
__Pyx__PyExc_StopAsyncIteration_type.tp_clear = ((PyTypeObject*)PyExc_BaseException)->tp_clear;
__Pyx__PyExc_StopAsyncIteration_type.tp_dictoffset = ((PyTypeObject*)PyExc_BaseException)->tp_dictoffset;
__Pyx__PyExc_StopAsyncIteration_type.tp_base = (PyTypeObject*)PyExc_Exception;
__Pyx_PyExc_StopAsyncIteration = (PyObject*) __Pyx_FetchCommonType(&__Pyx__PyExc_StopAsyncIteration_type);
if (unlikely(!__Pyx_PyExc_StopAsyncIteration))
return -1;
if (builtins && unlikely(PyMapping_SetItemString(builtins, (char*) "StopAsyncIteration", __Pyx_PyExc_StopAsyncIteration) < 0))
return -1;
#endif
return 0;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/CpdefEnums.pyx 0000644 0001751 0000171 00000003545 00000000000 021451 0 ustar 00runner docker 0000000 0000000 #################### EnumBase ####################
cimport cython
cdef extern from *:
int PY_VERSION_HEX
cdef object __Pyx_OrderedDict
if PY_VERSION_HEX >= 0x02070000:
from collections import OrderedDict as __Pyx_OrderedDict
else:
__Pyx_OrderedDict = dict
@cython.internal
cdef class __Pyx_EnumMeta(type):
def __init__(cls, name, parents, dct):
type.__init__(cls, name, parents, dct)
cls.__members__ = __Pyx_OrderedDict()
def __iter__(cls):
return iter(cls.__members__.values())
def __getitem__(cls, name):
return cls.__members__[name]
# @cython.internal
cdef object __Pyx_EnumBase
class __Pyx_EnumBase(int):
__metaclass__ = __Pyx_EnumMeta
def __new__(cls, value, name=None):
for v in cls:
if v == value:
return v
if name is None:
raise ValueError("Unknown enum value: '%s'" % value)
res = int.__new__(cls, value)
res.name = name
setattr(cls, name, res)
cls.__members__[name] = res
return res
def __repr__(self):
return "<%s.%s: %d>" % (self.__class__.__name__, self.name, self)
def __str__(self):
return "%s.%s" % (self.__class__.__name__, self.name)
if PY_VERSION_HEX >= 0x03040000:
from enum import IntEnum as __Pyx_EnumBase
#################### EnumType ####################
#@requires: EnumBase
cdef dict __Pyx_globals = globals()
if PY_VERSION_HEX >= 0x03040000:
# create new IntEnum()
{{name}} = __Pyx_EnumBase('{{name}}', __Pyx_OrderedDict([
{{for item in items}}
('{{item}}', {{item}}),
{{endfor}}
]))
{{for item in items}}
__Pyx_globals['{{item}}'] = {{name}}.{{item}}
{{endfor}}
else:
class {{name}}(__Pyx_EnumBase):
pass
{{for item in items}}
__Pyx_globals['{{item}}'] = {{name}}({{item}}, '{{item}}')
{{endfor}}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/CppConvert.pyx 0000644 0001751 0000171 00000013722 00000000000 021501 0 ustar 00runner docker 0000000 0000000 # TODO: Figure out how many of the pass-by-value copies the compiler can eliminate.
#################### string.from_py ####################
cdef extern from *:
cdef cppclass string "{{type}}":
string()
string(char* c_str, size_t size)
cdef const char* __Pyx_PyObject_AsStringAndSize(object, Py_ssize_t*) except NULL
@cname("{{cname}}")
cdef string {{cname}}(object o) except *:
cdef Py_ssize_t length = 0
cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length)
return string(data, length)
#################### string.to_py ####################
#cimport cython
#from libcpp.string cimport string
cdef extern from *:
cdef cppclass string "{{type}}":
char* data()
size_t size()
{{for py_type in ['PyObject', 'PyUnicode', 'PyStr', 'PyBytes', 'PyByteArray']}}
cdef extern from *:
cdef object __Pyx_{{py_type}}_FromStringAndSize(const char*, size_t)
@cname("{{cname.replace("PyObject", py_type, 1)}}")
cdef inline object {{cname.replace("PyObject", py_type, 1)}}(const string& s):
return __Pyx_{{py_type}}_FromStringAndSize(s.data(), s.size())
{{endfor}}
#################### vector.from_py ####################
cdef extern from *:
cdef cppclass vector "std::vector" [T]:
void push_back(T&)
@cname("{{cname}}")
cdef vector[X] {{cname}}(object o) except *:
cdef vector[X] v
for item in o:
v.push_back(item)
return v
#################### vector.to_py ####################
cdef extern from *:
cdef cppclass vector "const std::vector" [T]:
size_t size()
T& operator[](size_t)
@cname("{{cname}}")
cdef object {{cname}}(vector[X]& v):
return [v[i] for i in range(v.size())]
#################### list.from_py ####################
cdef extern from *:
cdef cppclass cpp_list "std::list" [T]:
void push_back(T&)
@cname("{{cname}}")
cdef cpp_list[X] {{cname}}(object o) except *:
cdef cpp_list[X] l
for item in o:
l.push_back(item)
return l
#################### list.to_py ####################
cimport cython
cdef extern from *:
cdef cppclass cpp_list "std::list" [T]:
cppclass const_iterator:
T& operator*()
const_iterator operator++()
bint operator!=(const_iterator)
const_iterator begin()
const_iterator end()
@cname("{{cname}}")
cdef object {{cname}}(const cpp_list[X]& v):
o = []
cdef cpp_list[X].const_iterator iter = v.begin()
while iter != v.end():
o.append(cython.operator.dereference(iter))
cython.operator.preincrement(iter)
return o
#################### set.from_py ####################
cdef extern from *:
cdef cppclass set "std::{{maybe_unordered}}set" [T]:
void insert(T&)
@cname("{{cname}}")
cdef set[X] {{cname}}(object o) except *:
cdef set[X] s
for item in o:
s.insert(item)
return s
#################### set.to_py ####################
cimport cython
cdef extern from *:
cdef cppclass cpp_set "std::{{maybe_unordered}}set" [T]:
cppclass const_iterator:
T& operator*()
const_iterator operator++()
bint operator!=(const_iterator)
const_iterator begin()
const_iterator end()
@cname("{{cname}}")
cdef object {{cname}}(const cpp_set[X]& s):
o = set()
cdef cpp_set[X].const_iterator iter = s.begin()
while iter != s.end():
o.add(cython.operator.dereference(iter))
cython.operator.preincrement(iter)
return o
#################### pair.from_py ####################
cdef extern from *:
cdef cppclass pair "std::pair" [T, U]:
pair()
pair(T&, U&)
@cname("{{cname}}")
cdef pair[X,Y] {{cname}}(object o) except *:
x, y = o
return pair[X,Y](x, y)
#################### pair.to_py ####################
cdef extern from *:
cdef cppclass pair "std::pair" [T, U]:
T first
U second
@cname("{{cname}}")
cdef object {{cname}}(const pair[X,Y]& p):
return p.first, p.second
#################### map.from_py ####################
cdef extern from *:
cdef cppclass pair "std::pair" [T, U]:
pair(T&, U&)
cdef cppclass map "std::{{maybe_unordered}}map" [T, U]:
void insert(pair[T, U]&)
cdef cppclass vector "std::vector" [T]:
pass
@cname("{{cname}}")
cdef map[X,Y] {{cname}}(object o) except *:
cdef dict d = o
cdef map[X,Y] m
for key, value in d.iteritems():
m.insert(pair[X,Y](key, value))
return m
#################### map.to_py ####################
# TODO: Work out const so that this can take a const
# reference rather than pass by value.
cimport cython
cdef extern from *:
cdef cppclass map "std::{{maybe_unordered}}map" [T, U]:
cppclass value_type:
T first
U second
cppclass const_iterator:
value_type& operator*()
const_iterator operator++()
bint operator!=(const_iterator)
const_iterator begin()
const_iterator end()
@cname("{{cname}}")
cdef object {{cname}}(const map[X,Y]& s):
o = {}
cdef const map[X,Y].value_type *key_value
cdef map[X,Y].const_iterator iter = s.begin()
while iter != s.end():
key_value = &cython.operator.dereference(iter)
o[key_value.first] = key_value.second
cython.operator.preincrement(iter)
return o
#################### complex.from_py ####################
cdef extern from *:
cdef cppclass std_complex "std::complex" [T]:
std_complex()
std_complex(T, T) except +
@cname("{{cname}}")
cdef std_complex[X] {{cname}}(object o) except *:
cdef double complex z = o
return std_complex[X](z.real, z.imag)
#################### complex.to_py ####################
cdef extern from *:
cdef cppclass std_complex "std::complex" [T]:
X real()
X imag()
@cname("{{cname}}")
cdef object {{cname}}(const std_complex[X]& z):
cdef double complex tmp
tmp.real = z.real()
tmp.imag = z.imag()
return tmp
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/CppSupport.cpp 0000644 0001751 0000171 00000004272 00000000000 021477 0 ustar 00runner docker 0000000 0000000 /////////////// CppExceptionConversion.proto ///////////////
#ifndef __Pyx_CppExn2PyErr
#include
#include
#include
#include
static void __Pyx_CppExn2PyErr() {
// Catch a handful of different errors here and turn them into the
// equivalent Python errors.
try {
if (PyErr_Occurred())
; // let the latest Python exn pass through and ignore the current one
else
throw;
} catch (const std::bad_alloc& exn) {
PyErr_SetString(PyExc_MemoryError, exn.what());
} catch (const std::bad_cast& exn) {
PyErr_SetString(PyExc_TypeError, exn.what());
} catch (const std::bad_typeid& exn) {
PyErr_SetString(PyExc_TypeError, exn.what());
} catch (const std::domain_error& exn) {
PyErr_SetString(PyExc_ValueError, exn.what());
} catch (const std::invalid_argument& exn) {
PyErr_SetString(PyExc_ValueError, exn.what());
} catch (const std::ios_base::failure& exn) {
// Unfortunately, in standard C++ we have no way of distinguishing EOF
// from other errors here; be careful with the exception mask
PyErr_SetString(PyExc_IOError, exn.what());
} catch (const std::out_of_range& exn) {
// Change out_of_range to IndexError
PyErr_SetString(PyExc_IndexError, exn.what());
} catch (const std::overflow_error& exn) {
PyErr_SetString(PyExc_OverflowError, exn.what());
} catch (const std::range_error& exn) {
PyErr_SetString(PyExc_ArithmeticError, exn.what());
} catch (const std::underflow_error& exn) {
PyErr_SetString(PyExc_ArithmeticError, exn.what());
} catch (const std::exception& exn) {
PyErr_SetString(PyExc_RuntimeError, exn.what());
}
catch (...)
{
PyErr_SetString(PyExc_RuntimeError, "Unknown exception");
}
}
#endif
/////////////// PythranConversion.proto ///////////////
template
auto __Pyx_pythran_to_python(T &&value) -> decltype(to_python(
typename pythonic::returnable::type>::type>::type{std::forward(value)}))
{
using returnable_type = typename pythonic::returnable::type>::type>::type;
return to_python(returnable_type{std::forward(value)});
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/CythonFunction.c 0000644 0001751 0000171 00000131651 00000000000 021774 0 ustar 00runner docker 0000000 0000000
//////////////////// CythonFunctionShared.proto ////////////////////
#define __Pyx_CyFunction_USED 1
#define __Pyx_CYFUNCTION_STATICMETHOD 0x01
#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02
#define __Pyx_CYFUNCTION_CCLASS 0x04
#define __Pyx_CyFunction_GetClosure(f) \
(((__pyx_CyFunctionObject *) (f))->func_closure)
#define __Pyx_CyFunction_GetClassObj(f) \
(((__pyx_CyFunctionObject *) (f))->func_classobj)
#define __Pyx_CyFunction_Defaults(type, f) \
((type *)(((__pyx_CyFunctionObject *) (f))->defaults))
#define __Pyx_CyFunction_SetDefaultsGetter(f, g) \
((__pyx_CyFunctionObject *) (f))->defaults_getter = (g)
typedef struct {
PyCFunctionObject func;
#if PY_VERSION_HEX < 0x030500A0
PyObject *func_weakreflist;
#endif
PyObject *func_dict;
PyObject *func_name;
PyObject *func_qualname;
PyObject *func_doc;
PyObject *func_globals;
PyObject *func_code;
PyObject *func_closure;
// No-args super() class cell
PyObject *func_classobj;
// Dynamic default args and annotations
void *defaults;
int defaults_pyobjects;
size_t defaults_size; // used by FusedFunction for copying defaults
int flags;
// Defaults info
PyObject *defaults_tuple; /* Const defaults tuple */
PyObject *defaults_kwdict; /* Const kwonly defaults dict */
PyObject *(*defaults_getter)(PyObject *);
PyObject *func_annotations; /* function annotations dict */
} __pyx_CyFunctionObject;
static PyTypeObject *__pyx_CyFunctionType = 0;
#define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType))
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml,
int flags, PyObject* qualname,
PyObject *self,
PyObject *module, PyObject *globals,
PyObject* code);
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m,
size_t size,
int pyobjects);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m,
PyObject *tuple);
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m,
PyObject *dict);
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m,
PyObject *dict);
static int __pyx_CyFunction_init(void);
//////////////////// CythonFunctionShared ////////////////////
//@substitute: naming
//@requires: CommonStructures.c::FetchCommonType
////@requires: ObjectHandling.c::PyObjectGetAttrStr
#include
static PyObject *
__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure)
{
if (unlikely(op->func_doc == NULL)) {
if (op->func.m_ml->ml_doc) {
#if PY_MAJOR_VERSION >= 3
op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc);
#else
op->func_doc = PyString_FromString(op->func.m_ml->ml_doc);
#endif
if (unlikely(op->func_doc == NULL))
return NULL;
} else {
Py_INCREF(Py_None);
return Py_None;
}
}
Py_INCREF(op->func_doc);
return op->func_doc;
}
static int
__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp = op->func_doc;
if (value == NULL) {
// Mark as deleted
value = Py_None;
}
Py_INCREF(value);
op->func_doc = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
if (unlikely(op->func_name == NULL)) {
#if PY_MAJOR_VERSION >= 3
op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name);
#else
op->func_name = PyString_InternFromString(op->func.m_ml->ml_name);
#endif
if (unlikely(op->func_name == NULL))
return NULL;
}
Py_INCREF(op->func_name);
return op->func_name;
}
static int
__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__name__ must be set to a string object");
return -1;
}
tmp = op->func_name;
Py_INCREF(value);
op->func_name = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(op->func_qualname);
return op->func_qualname;
}
static int
__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
#if PY_MAJOR_VERSION >= 3
if (unlikely(value == NULL || !PyUnicode_Check(value)))
#else
if (unlikely(value == NULL || !PyString_Check(value)))
#endif
{
PyErr_SetString(PyExc_TypeError,
"__qualname__ must be set to a string object");
return -1;
}
tmp = op->func_qualname;
Py_INCREF(value);
op->func_qualname = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure)
{
PyObject *self;
self = m->func_closure;
if (self == NULL)
self = Py_None;
Py_INCREF(self);
return self;
}
static PyObject *
__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
if (unlikely(op->func_dict == NULL)) {
op->func_dict = PyDict_New();
if (unlikely(op->func_dict == NULL))
return NULL;
}
Py_INCREF(op->func_dict);
return op->func_dict;
}
static int
__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context)
{
PyObject *tmp;
if (unlikely(value == NULL)) {
PyErr_SetString(PyExc_TypeError,
"function's dictionary may not be deleted");
return -1;
}
if (unlikely(!PyDict_Check(value))) {
PyErr_SetString(PyExc_TypeError,
"setting function's dictionary to a non-dict");
return -1;
}
tmp = op->func_dict;
Py_INCREF(value);
op->func_dict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(op->func_globals);
return op->func_globals;
}
static PyObject *
__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
Py_INCREF(Py_None);
return Py_None;
}
static PyObject *
__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context)
{
PyObject* result = (op->func_code) ? op->func_code : Py_None;
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) {
int result = 0;
PyObject *res = op->defaults_getter((PyObject *) op);
if (unlikely(!res))
return -1;
// Cache result
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
op->defaults_tuple = PyTuple_GET_ITEM(res, 0);
Py_INCREF(op->defaults_tuple);
op->defaults_kwdict = PyTuple_GET_ITEM(res, 1);
Py_INCREF(op->defaults_kwdict);
#else
op->defaults_tuple = PySequence_ITEM(res, 0);
if (unlikely(!op->defaults_tuple)) result = -1;
else {
op->defaults_kwdict = PySequence_ITEM(res, 1);
if (unlikely(!op->defaults_kwdict)) result = -1;
}
#endif
Py_DECREF(res);
return result;
}
static int
__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value) {
// del => explicit None to prevent rebuilding
value = Py_None;
} else if (value != Py_None && !PyTuple_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__defaults__ must be set to a tuple object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_tuple;
op->defaults_tuple = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->defaults_tuple;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_tuple;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value) {
// del => explicit None to prevent rebuilding
value = Py_None;
} else if (value != Py_None && !PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__kwdefaults__ must be set to a dict object");
return -1;
}
Py_INCREF(value);
tmp = op->defaults_kwdict;
op->defaults_kwdict = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->defaults_kwdict;
if (unlikely(!result)) {
if (op->defaults_getter) {
if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL;
result = op->defaults_kwdict;
} else {
result = Py_None;
}
}
Py_INCREF(result);
return result;
}
static int
__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) {
PyObject* tmp;
if (!value || value == Py_None) {
value = NULL;
} else if (!PyDict_Check(value)) {
PyErr_SetString(PyExc_TypeError,
"__annotations__ must be set to a dict object");
return -1;
}
Py_XINCREF(value);
tmp = op->func_annotations;
op->func_annotations = value;
Py_XDECREF(tmp);
return 0;
}
static PyObject *
__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
PyObject* result = op->func_annotations;
if (unlikely(!result)) {
result = PyDict_New();
if (unlikely(!result)) return NULL;
op->func_annotations = result;
}
Py_INCREF(result);
return result;
}
//#if PY_VERSION_HEX >= 0x030400C1
//static PyObject *
//__Pyx_CyFunction_get_signature(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) {
// PyObject *inspect_module, *signature_class, *signature;
// // from inspect import Signature
// inspect_module = PyImport_ImportModuleLevelObject(PYIDENT("inspect"), NULL, NULL, NULL, 0);
// if (unlikely(!inspect_module))
// goto bad;
// signature_class = __Pyx_PyObject_GetAttrStr(inspect_module, PYIDENT("Signature"));
// Py_DECREF(inspect_module);
// if (unlikely(!signature_class))
// goto bad;
// // return Signature.from_function(op)
// signature = PyObject_CallMethodObjArgs(signature_class, PYIDENT("from_function"), op, NULL);
// Py_DECREF(signature_class);
// if (likely(signature))
// return signature;
//bad:
// // make sure we raise an AttributeError from this property on any errors
// if (!PyErr_ExceptionMatches(PyExc_AttributeError))
// PyErr_SetString(PyExc_AttributeError, "failed to calculate __signature__");
// return NULL;
//}
//#endif
static PyGetSetDef __pyx_CyFunction_getsets[] = {
{(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0},
{(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0},
{(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0},
{(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0},
{(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0},
{(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0},
{(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0},
{(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0},
{(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0},
{(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0},
{(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0},
//#if PY_VERSION_HEX >= 0x030400C1
// {(char *) "__signature__", (getter)__Pyx_CyFunction_get_signature, 0, 0, 0},
//#endif
{0, 0, 0, 0, 0}
};
static PyMemberDef __pyx_CyFunction_members[] = {
{(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0},
{0, 0, 0, 0, 0}
};
static PyObject *
__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args)
{
#if PY_MAJOR_VERSION >= 3
Py_INCREF(m->func_qualname);
return m->func_qualname;
#else
return PyString_FromString(m->func.m_ml->ml_name);
#endif
}
static PyMethodDef __pyx_CyFunction_methods[] = {
{"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0},
{0, 0, 0, 0}
};
#if PY_VERSION_HEX < 0x030500A0
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist)
#else
#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist)
#endif
static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname,
PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
if (unlikely(op == NULL))
return NULL;
op->flags = flags;
__Pyx_CyFunction_weakreflist(op) = NULL;
op->func.m_ml = ml;
op->func.m_self = (PyObject *) op;
Py_XINCREF(closure);
op->func_closure = closure;
Py_XINCREF(module);
op->func.m_module = module;
op->func_dict = NULL;
op->func_name = NULL;
Py_INCREF(qualname);
op->func_qualname = qualname;
op->func_doc = NULL;
op->func_classobj = NULL;
op->func_globals = globals;
Py_INCREF(op->func_globals);
Py_XINCREF(code);
op->func_code = code;
// Dynamic Default args
op->defaults_pyobjects = 0;
op->defaults_size = 0;
op->defaults = NULL;
op->defaults_tuple = NULL;
op->defaults_kwdict = NULL;
op->defaults_getter = NULL;
op->func_annotations = NULL;
return (PyObject *) op;
}
static int
__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m)
{
Py_CLEAR(m->func_closure);
Py_CLEAR(m->func.m_module);
Py_CLEAR(m->func_dict);
Py_CLEAR(m->func_name);
Py_CLEAR(m->func_qualname);
Py_CLEAR(m->func_doc);
Py_CLEAR(m->func_globals);
Py_CLEAR(m->func_code);
Py_CLEAR(m->func_classobj);
Py_CLEAR(m->defaults_tuple);
Py_CLEAR(m->defaults_kwdict);
Py_CLEAR(m->func_annotations);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_XDECREF(pydefaults[i]);
PyObject_Free(m->defaults);
m->defaults = NULL;
}
return 0;
}
static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
if (__Pyx_CyFunction_weakreflist(m) != NULL)
PyObject_ClearWeakRefs((PyObject *) m);
__Pyx_CyFunction_clear(m);
PyObject_GC_Del(m);
}
static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m)
{
PyObject_GC_UnTrack(m);
__Pyx__CyFunction_dealloc(m);
}
static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg)
{
Py_VISIT(m->func_closure);
Py_VISIT(m->func.m_module);
Py_VISIT(m->func_dict);
Py_VISIT(m->func_name);
Py_VISIT(m->func_qualname);
Py_VISIT(m->func_doc);
Py_VISIT(m->func_globals);
Py_VISIT(m->func_code);
Py_VISIT(m->func_classobj);
Py_VISIT(m->defaults_tuple);
Py_VISIT(m->defaults_kwdict);
if (m->defaults) {
PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m);
int i;
for (i = 0; i < m->defaults_pyobjects; i++)
Py_VISIT(pydefaults[i]);
}
return 0;
}
static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type)
{
#if PY_MAJOR_VERSION < 3
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) {
Py_INCREF(func);
return func;
}
if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) {
if (type == NULL)
type = (PyObject *)(Py_TYPE(obj));
return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type)));
}
if (obj == Py_None)
obj = NULL;
#endif
return __Pyx_PyMethod_New(func, obj, type);
}
static PyObject*
__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op)
{
#if PY_MAJOR_VERSION >= 3
return PyUnicode_FromFormat("",
op->func_qualname, (void *)op);
#else
return PyString_FromFormat("",
PyString_AsString(op->func_qualname), (void *)op);
#endif
}
static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) {
// originally copied from PyCFunction_Call() in CPython's Objects/methodobject.c
PyCFunctionObject* f = (PyCFunctionObject*)func;
PyCFunction meth = f->m_ml->ml_meth;
Py_ssize_t size;
switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) {
case METH_VARARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0))
return (*meth)(self, arg);
break;
case METH_VARARGS | METH_KEYWORDS:
return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw);
case METH_NOARGS:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 0))
return (*meth)(self, NULL);
PyErr_Format(PyExc_TypeError,
"%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
case METH_O:
if (likely(kw == NULL || PyDict_Size(kw) == 0)) {
size = PyTuple_GET_SIZE(arg);
if (likely(size == 1)) {
PyObject *result, *arg0;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
arg0 = PyTuple_GET_ITEM(arg, 0);
#else
arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL;
#endif
result = (*meth)(self, arg0);
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF(arg0);
#endif
return result;
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)",
f->m_ml->ml_name, size);
return NULL;
}
break;
default:
PyErr_SetString(PyExc_SystemError, "Bad call flags in "
"__Pyx_CyFunction_Call. METH_OLDARGS is no "
"longer supported!");
return NULL;
}
PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments",
f->m_ml->ml_name);
return NULL;
}
static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) {
return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw);
}
static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) {
PyObject *result;
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) {
Py_ssize_t argc;
PyObject *new_args;
PyObject *self;
argc = PyTuple_GET_SIZE(args);
new_args = PyTuple_GetSlice(args, 1, argc);
if (unlikely(!new_args))
return NULL;
self = PyTuple_GetItem(args, 0);
if (unlikely(!self)) {
Py_DECREF(new_args);
return NULL;
}
result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw);
Py_DECREF(new_args);
} else {
result = __Pyx_CyFunction_Call(func, args, kw);
}
return result;
}
static PyTypeObject __pyx_CyFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"cython_function_or_method", /*tp_name*/
sizeof(__pyx_CyFunctionObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __Pyx_CyFunction_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
(reprfunc) __Pyx_CyFunction_repr, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
__Pyx_CyFunction_CallAsMethod, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
(traverseproc) __Pyx_CyFunction_traverse, /*tp_traverse*/
(inquiry) __Pyx_CyFunction_clear, /*tp_clear*/
0, /*tp_richcompare*/
#if PY_VERSION_HEX < 0x030500A0
offsetof(__pyx_CyFunctionObject, func_weakreflist), /*tp_weaklistoffset*/
#else
offsetof(PyCFunctionObject, m_weakreflist), /*tp_weaklistoffset*/
#endif
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_CyFunction_methods, /*tp_methods*/
__pyx_CyFunction_members, /*tp_members*/
__pyx_CyFunction_getsets, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
__Pyx_CyFunction_descr_get, /*tp_descr_get*/
0, /*tp_descr_set*/
offsetof(__pyx_CyFunctionObject, func_dict),/*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
0, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static int __pyx_CyFunction_init(void) {
__pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type);
if (unlikely(__pyx_CyFunctionType == NULL)) {
return -1;
}
return 0;
}
static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults = PyObject_Malloc(size);
if (unlikely(!m->defaults))
return PyErr_NoMemory();
memset(m->defaults, 0, size);
m->defaults_pyobjects = pyobjects;
m->defaults_size = size;
return m->defaults;
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_tuple = tuple;
Py_INCREF(tuple);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->defaults_kwdict = dict;
Py_INCREF(dict);
}
static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func;
m->func_annotations = dict;
Py_INCREF(dict);
}
//////////////////// CythonFunction.proto ////////////////////
static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml,
int flags, PyObject* qualname,
PyObject *closure,
PyObject *module, PyObject *globals,
PyObject* code);
//////////////////// CythonFunction ////////////////////
//@requires: CythonFunctionShared
static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname,
PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) {
PyObject *op = __Pyx_CyFunction_Init(
PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType),
ml, flags, qualname, closure, module, globals, code
);
if (likely(op)) {
PyObject_GC_Track(op);
}
return op;
}
//////////////////// CyFunctionClassCell.proto ////////////////////
static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *classobj);/*proto*/
//////////////////// CyFunctionClassCell ////////////////////
//@requires: CythonFunctionShared
static int __Pyx_CyFunction_InitClassCell(PyObject *cyfunctions, PyObject *classobj) {
Py_ssize_t i, count = PyList_GET_SIZE(cyfunctions);
for (i = 0; i < count; i++) {
__pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *)
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyList_GET_ITEM(cyfunctions, i);
#else
PySequence_ITEM(cyfunctions, i);
if (unlikely(!m))
return -1;
#endif
Py_INCREF(classobj);
m->func_classobj = classobj;
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF((PyObject*)m);
#endif
}
return 0;
}
//////////////////// FusedFunction.proto ////////////////////
typedef struct {
__pyx_CyFunctionObject func;
PyObject *__signatures__;
PyObject *type;
PyObject *self;
} __pyx_FusedFunctionObject;
static PyObject *__pyx_FusedFunction_New(PyMethodDef *ml, int flags,
PyObject *qualname, PyObject *closure,
PyObject *module, PyObject *globals,
PyObject *code);
static int __pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self);
static PyTypeObject *__pyx_FusedFunctionType = NULL;
static int __pyx_FusedFunction_init(void);
#define __Pyx_FusedFunction_USED
//////////////////// FusedFunction ////////////////////
//@requires: CythonFunctionShared
static PyObject *
__pyx_FusedFunction_New(PyMethodDef *ml, int flags,
PyObject *qualname, PyObject *closure,
PyObject *module, PyObject *globals,
PyObject *code)
{
PyObject *op = __Pyx_CyFunction_Init(
// __pyx_CyFunctionObject is correct below since that's the cast that we want.
PyObject_GC_New(__pyx_CyFunctionObject, __pyx_FusedFunctionType),
ml, flags, qualname, closure, module, globals, code
);
if (likely(op)) {
__pyx_FusedFunctionObject *fusedfunc = (__pyx_FusedFunctionObject *) op;
fusedfunc->__signatures__ = NULL;
fusedfunc->type = NULL;
fusedfunc->self = NULL;
PyObject_GC_Track(op);
}
return op;
}
static void
__pyx_FusedFunction_dealloc(__pyx_FusedFunctionObject *self)
{
PyObject_GC_UnTrack(self);
Py_CLEAR(self->self);
Py_CLEAR(self->type);
Py_CLEAR(self->__signatures__);
__Pyx__CyFunction_dealloc((__pyx_CyFunctionObject *) self);
}
static int
__pyx_FusedFunction_traverse(__pyx_FusedFunctionObject *self,
visitproc visit,
void *arg)
{
Py_VISIT(self->self);
Py_VISIT(self->type);
Py_VISIT(self->__signatures__);
return __Pyx_CyFunction_traverse((__pyx_CyFunctionObject *) self, visit, arg);
}
static int
__pyx_FusedFunction_clear(__pyx_FusedFunctionObject *self)
{
Py_CLEAR(self->self);
Py_CLEAR(self->type);
Py_CLEAR(self->__signatures__);
return __Pyx_CyFunction_clear((__pyx_CyFunctionObject *) self);
}
static PyObject *
__pyx_FusedFunction_descr_get(PyObject *self, PyObject *obj, PyObject *type)
{
__pyx_FusedFunctionObject *func, *meth;
func = (__pyx_FusedFunctionObject *) self;
if (func->self || func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD) {
// Do not allow rebinding and don't do anything for static methods
Py_INCREF(self);
return self;
}
if (obj == Py_None)
obj = NULL;
meth = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_New(
((PyCFunctionObject *) func)->m_ml,
((__pyx_CyFunctionObject *) func)->flags,
((__pyx_CyFunctionObject *) func)->func_qualname,
((__pyx_CyFunctionObject *) func)->func_closure,
((PyCFunctionObject *) func)->m_module,
((__pyx_CyFunctionObject *) func)->func_globals,
((__pyx_CyFunctionObject *) func)->func_code);
if (!meth)
return NULL;
// defaults needs copying fully rather than just copying the pointer
// since otherwise it will be freed on destruction of meth despite
// belonging to func rather than meth
if (func->func.defaults) {
PyObject **pydefaults;
int i;
if (!__Pyx_CyFunction_InitDefaults((PyObject*)meth,
func->func.defaults_size,
func->func.defaults_pyobjects)) {
Py_XDECREF((PyObject*)meth);
return NULL;
}
memcpy(meth->func.defaults, func->func.defaults, func->func.defaults_size);
pydefaults = __Pyx_CyFunction_Defaults(PyObject *, meth);
for (i = 0; i < meth->func.defaults_pyobjects; i++)
Py_XINCREF(pydefaults[i]);
}
Py_XINCREF(func->func.func_classobj);
meth->func.func_classobj = func->func.func_classobj;
Py_XINCREF(func->__signatures__);
meth->__signatures__ = func->__signatures__;
Py_XINCREF(type);
meth->type = type;
Py_XINCREF(func->func.defaults_tuple);
meth->func.defaults_tuple = func->func.defaults_tuple;
if (func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD)
obj = type;
Py_XINCREF(obj);
meth->self = obj;
return (PyObject *) meth;
}
static PyObject *
_obj_to_str(PyObject *obj)
{
if (PyType_Check(obj))
return PyObject_GetAttr(obj, PYIDENT("__name__"));
else
return PyObject_Str(obj);
}
static PyObject *
__pyx_FusedFunction_getitem(__pyx_FusedFunctionObject *self, PyObject *idx)
{
PyObject *signature = NULL;
PyObject *unbound_result_func;
PyObject *result_func = NULL;
if (self->__signatures__ == NULL) {
PyErr_SetString(PyExc_TypeError, "Function is not fused");
return NULL;
}
if (PyTuple_Check(idx)) {
PyObject *list = PyList_New(0);
Py_ssize_t n = PyTuple_GET_SIZE(idx);
PyObject *sep = NULL;
int i;
if (unlikely(!list))
return NULL;
for (i = 0; i < n; i++) {
int ret;
PyObject *string;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *item = PyTuple_GET_ITEM(idx, i);
#else
PyObject *item = PySequence_ITEM(idx, i); if (unlikely(!item)) goto __pyx_err;
#endif
string = _obj_to_str(item);
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF(item);
#endif
if (unlikely(!string)) goto __pyx_err;
ret = PyList_Append(list, string);
Py_DECREF(string);
if (unlikely(ret < 0)) goto __pyx_err;
}
sep = PyUnicode_FromString("|");
if (likely(sep))
signature = PyUnicode_Join(sep, list);
__pyx_err:
;
Py_DECREF(list);
Py_XDECREF(sep);
} else {
signature = _obj_to_str(idx);
}
if (!signature)
return NULL;
unbound_result_func = PyObject_GetItem(self->__signatures__, signature);
if (unbound_result_func) {
if (self->self || self->type) {
__pyx_FusedFunctionObject *unbound = (__pyx_FusedFunctionObject *) unbound_result_func;
// TODO: move this to InitClassCell
Py_CLEAR(unbound->func.func_classobj);
Py_XINCREF(self->func.func_classobj);
unbound->func.func_classobj = self->func.func_classobj;
result_func = __pyx_FusedFunction_descr_get(unbound_result_func,
self->self, self->type);
} else {
result_func = unbound_result_func;
Py_INCREF(result_func);
}
}
Py_DECREF(signature);
Py_XDECREF(unbound_result_func);
return result_func;
}
static PyObject *
__pyx_FusedFunction_callfunction(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func;
int static_specialized = (cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD &&
!((__pyx_FusedFunctionObject *) func)->__signatures__);
if (cyfunc->flags & __Pyx_CYFUNCTION_CCLASS && !static_specialized) {
return __Pyx_CyFunction_CallAsMethod(func, args, kw);
} else {
return __Pyx_CyFunction_Call(func, args, kw);
}
}
// Note: the 'self' from method binding is passed in in the args tuple,
// whereas PyCFunctionObject's m_self is passed in as the first
// argument to the C function. For extension methods we need
// to pass 'self' as 'm_self' and not as the first element of the
// args tuple.
static PyObject *
__pyx_FusedFunction_call(PyObject *func, PyObject *args, PyObject *kw)
{
__pyx_FusedFunctionObject *binding_func = (__pyx_FusedFunctionObject *) func;
Py_ssize_t argc = PyTuple_GET_SIZE(args);
PyObject *new_args = NULL;
__pyx_FusedFunctionObject *new_func = NULL;
PyObject *result = NULL;
PyObject *self = NULL;
int is_staticmethod = binding_func->func.flags & __Pyx_CYFUNCTION_STATICMETHOD;
int is_classmethod = binding_func->func.flags & __Pyx_CYFUNCTION_CLASSMETHOD;
if (binding_func->self) {
// Bound method call, put 'self' in the args tuple
Py_ssize_t i;
new_args = PyTuple_New(argc + 1);
if (!new_args)
return NULL;
self = binding_func->self;
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_INCREF(self);
#endif
Py_INCREF(self);
PyTuple_SET_ITEM(new_args, 0, self);
for (i = 0; i < argc; i++) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *item = PyTuple_GET_ITEM(args, i);
Py_INCREF(item);
#else
PyObject *item = PySequence_ITEM(args, i); if (unlikely(!item)) goto bad;
#endif
PyTuple_SET_ITEM(new_args, i + 1, item);
}
args = new_args;
} else if (binding_func->type) {
// Unbound method call
if (argc < 1) {
PyErr_SetString(PyExc_TypeError, "Need at least one argument, 0 given.");
return NULL;
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
self = PyTuple_GET_ITEM(args, 0);
#else
self = PySequence_ITEM(args, 0); if (unlikely(!self)) return NULL;
#endif
}
if (self && !is_classmethod && !is_staticmethod) {
int is_instance = PyObject_IsInstance(self, binding_func->type);
if (unlikely(!is_instance)) {
PyErr_Format(PyExc_TypeError,
"First argument should be of type %.200s, got %.200s.",
((PyTypeObject *) binding_func->type)->tp_name,
Py_TYPE(self)->tp_name);
goto bad;
} else if (unlikely(is_instance == -1)) {
goto bad;
}
}
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_XDECREF(self);
self = NULL;
#endif
if (binding_func->__signatures__) {
PyObject *tup;
if (is_staticmethod && binding_func->func.flags & __Pyx_CYFUNCTION_CCLASS) {
// FIXME: this seems wrong, but we must currently pass the signatures dict as 'self' argument
tup = PyTuple_Pack(3, args,
kw == NULL ? Py_None : kw,
binding_func->func.defaults_tuple);
if (unlikely(!tup)) goto bad;
new_func = (__pyx_FusedFunctionObject *) __Pyx_CyFunction_CallMethod(
func, binding_func->__signatures__, tup, NULL);
} else {
tup = PyTuple_Pack(4, binding_func->__signatures__, args,
kw == NULL ? Py_None : kw,
binding_func->func.defaults_tuple);
if (unlikely(!tup)) goto bad;
new_func = (__pyx_FusedFunctionObject *) __pyx_FusedFunction_callfunction(func, tup, NULL);
}
Py_DECREF(tup);
if (unlikely(!new_func))
goto bad;
Py_XINCREF(binding_func->func.func_classobj);
Py_CLEAR(new_func->func.func_classobj);
new_func->func.func_classobj = binding_func->func.func_classobj;
func = (PyObject *) new_func;
}
result = __pyx_FusedFunction_callfunction(func, args, kw);
bad:
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_XDECREF(self);
#endif
Py_XDECREF(new_args);
Py_XDECREF((PyObject *) new_func);
return result;
}
static PyMemberDef __pyx_FusedFunction_members[] = {
{(char *) "__signatures__",
T_OBJECT,
offsetof(__pyx_FusedFunctionObject, __signatures__),
READONLY,
0},
{0, 0, 0, 0, 0},
};
static PyMappingMethods __pyx_FusedFunction_mapping_methods = {
0,
(binaryfunc) __pyx_FusedFunction_getitem,
0,
};
static PyTypeObject __pyx_FusedFunctionType_type = {
PyVarObject_HEAD_INIT(0, 0)
"fused_cython_function", /*tp_name*/
sizeof(__pyx_FusedFunctionObject), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor) __pyx_FusedFunction_dealloc, /*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#else
0, /*reserved*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
&__pyx_FusedFunction_mapping_methods, /*tp_as_mapping*/
0, /*tp_hash*/
(ternaryfunc) __pyx_FusedFunction_call, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
(traverseproc) __pyx_FusedFunction_traverse, /*tp_traverse*/
(inquiry) __pyx_FusedFunction_clear,/*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
0, /*tp_methods*/
__pyx_FusedFunction_members, /*tp_members*/
// __doc__ is None for the fused function type, but we need it to be
// a descriptor for the instance's __doc__, so rebuild descriptors in our subclass
__pyx_CyFunction_getsets, /*tp_getset*/
// NOTE: tp_base may be changed later during module initialisation when importing CyFunction across modules.
&__pyx_CyFunctionType_type, /*tp_base*/
0, /*tp_dict*/
__pyx_FusedFunction_descr_get, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
0, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800)
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000
0, /*tp_pypy_flags*/
#endif
};
static int __pyx_FusedFunction_init(void) {
// Set base from __Pyx_FetchCommonTypeFromSpec, in case it's different from the local static value.
__pyx_FusedFunctionType_type.tp_base = __pyx_CyFunctionType;
__pyx_FusedFunctionType = __Pyx_FetchCommonType(&__pyx_FusedFunctionType_type);
if (__pyx_FusedFunctionType == NULL) {
return -1;
}
return 0;
}
//////////////////// ClassMethod.proto ////////////////////
#include "descrobject.h"
static CYTHON_UNUSED PyObject* __Pyx_Method_ClassMethod(PyObject *method); /*proto*/
//////////////////// ClassMethod ////////////////////
static PyObject* __Pyx_Method_ClassMethod(PyObject *method) {
#if CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM <= 0x05080000
if (PyObject_TypeCheck(method, &PyWrapperDescr_Type)) {
// cdef classes
return PyClassMethod_New(method);
}
#else
#if CYTHON_COMPILING_IN_PYSTON || CYTHON_COMPILING_IN_PYPY
// special C-API function only in Pyston and PyPy >= 5.9
if (PyMethodDescr_Check(method))
#else
#if PY_MAJOR_VERSION == 2
// PyMethodDescr_Type is not exposed in the CPython C-API in Py2.
static PyTypeObject *methoddescr_type = NULL;
if (methoddescr_type == NULL) {
PyObject *meth = PyObject_GetAttrString((PyObject*)&PyList_Type, "append");
if (!meth) return NULL;
methoddescr_type = Py_TYPE(meth);
Py_DECREF(meth);
}
#else
PyTypeObject *methoddescr_type = &PyMethodDescr_Type;
#endif
if (__Pyx_TypeCheck(method, methoddescr_type))
#endif
{
// cdef classes
PyMethodDescrObject *descr = (PyMethodDescrObject *)method;
#if PY_VERSION_HEX < 0x03020000
PyTypeObject *d_type = descr->d_type;
#else
PyTypeObject *d_type = descr->d_common.d_type;
#endif
return PyDescr_NewClassMethod(d_type, descr->d_method);
}
#endif
else if (PyMethod_Check(method)) {
// python classes
return PyClassMethod_New(PyMethod_GET_FUNCTION(method));
}
else {
return PyClassMethod_New(method);
}
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/Embed.c 0000644 0001751 0000171 00000015306 00000000000 020034 0 ustar 00runner docker 0000000 0000000 //////////////////// MainFunction ////////////////////
#ifdef __FreeBSD__
#include
#endif
#if PY_MAJOR_VERSION < 3
int %(main_method)s(int argc, char** argv) {
#elif defined(WIN32) || defined(MS_WINDOWS)
int %(wmain_method)s(int argc, wchar_t **argv) {
#else
static int __Pyx_main(int argc, wchar_t **argv) {
#endif
/* 754 requires that FP exceptions run in "no stop" mode by default,
* and until C vendors implement C99's ways to control FP exceptions,
* Python requires non-stop mode. Alas, some platforms enable FP
* exceptions by default. Here we disable them.
*/
#ifdef __FreeBSD__
fp_except_t m;
m = fpgetmask();
fpsetmask(m & ~FP_X_OFL);
#endif
if (argc && argv)
Py_SetProgramName(argv[0]);
Py_Initialize();
if (argc && argv)
PySys_SetArgv(argc, argv);
{ /* init module '%(module_name)s' as '__main__' */
PyObject* m = NULL;
%(module_is_main)s = 1;
#if PY_MAJOR_VERSION < 3
init%(module_name)s();
#elif CYTHON_PEP489_MULTI_PHASE_INIT
m = PyInit_%(module_name)s();
if (!PyModule_Check(m)) {
PyModuleDef *mdef = (PyModuleDef *) m;
PyObject *modname = PyUnicode_FromString("__main__");
m = NULL;
if (modname) {
// FIXME: not currently calling PyModule_FromDefAndSpec() here because we do not have a module spec!
// FIXME: not currently setting __file__, __path__, __spec__, ...
m = PyModule_NewObject(modname);
Py_DECREF(modname);
if (m) PyModule_ExecDef(m, mdef);
}
}
#else
m = PyInit_%(module_name)s();
#endif
if (PyErr_Occurred()) {
PyErr_Print(); /* This exits with the right code if SystemExit. */
#if PY_MAJOR_VERSION < 3
if (Py_FlushLine()) PyErr_Clear();
#endif
return 1;
}
Py_XDECREF(m);
}
#if PY_VERSION_HEX < 0x03060000
Py_Finalize();
#else
if (Py_FinalizeEx() < 0)
return 2;
#endif
return 0;
}
#if PY_MAJOR_VERSION >= 3 && !defined(WIN32) && !defined(MS_WINDOWS)
#include
static wchar_t*
__Pyx_char2wchar(char* arg)
{
wchar_t *res;
#ifdef HAVE_BROKEN_MBSTOWCS
/* Some platforms have a broken implementation of
* mbstowcs which does not count the characters that
* would result from conversion. Use an upper bound.
*/
size_t argsize = strlen(arg);
#else
size_t argsize = mbstowcs(NULL, arg, 0);
#endif
size_t count;
unsigned char *in;
wchar_t *out;
#ifdef HAVE_MBRTOWC
mbstate_t mbs;
#endif
if (argsize != (size_t)-1) {
res = (wchar_t *)malloc((argsize+1)*sizeof(wchar_t));
if (!res)
goto oom;
count = mbstowcs(res, arg, argsize+1);
if (count != (size_t)-1) {
wchar_t *tmp;
/* Only use the result if it contains no
surrogate characters. */
for (tmp = res; *tmp != 0 &&
(*tmp < 0xd800 || *tmp > 0xdfff); tmp++)
;
if (*tmp == 0)
return res;
}
free(res);
}
/* Conversion failed. Fall back to escaping with surrogateescape. */
#ifdef HAVE_MBRTOWC
/* Try conversion with mbrtwoc (C99), and escape non-decodable bytes. */
/* Overallocate; as multi-byte characters are in the argument, the
actual output could use less memory. */
argsize = strlen(arg) + 1;
res = (wchar_t *)malloc(argsize*sizeof(wchar_t));
if (!res) goto oom;
in = (unsigned char*)arg;
out = res;
memset(&mbs, 0, sizeof mbs);
while (argsize) {
size_t converted = mbrtowc(out, (char*)in, argsize, &mbs);
if (converted == 0)
/* Reached end of string; null char stored. */
break;
if (converted == (size_t)-2) {
/* Incomplete character. This should never happen,
since we provide everything that we have -
unless there is a bug in the C library, or I
misunderstood how mbrtowc works. */
fprintf(stderr, "unexpected mbrtowc result -2\\n");
free(res);
return NULL;
}
if (converted == (size_t)-1) {
/* Conversion error. Escape as UTF-8b, and start over
in the initial shift state. */
*out++ = 0xdc00 + *in++;
argsize--;
memset(&mbs, 0, sizeof mbs);
continue;
}
if (*out >= 0xd800 && *out <= 0xdfff) {
/* Surrogate character. Escape the original
byte sequence with surrogateescape. */
argsize -= converted;
while (converted--)
*out++ = 0xdc00 + *in++;
continue;
}
/* successfully converted some bytes */
in += converted;
argsize -= converted;
out++;
}
#else
/* Cannot use C locale for escaping; manually escape as if charset
is ASCII (i.e. escape all bytes > 128. This will still roundtrip
correctly in the locale's charset, which must be an ASCII superset. */
res = (wchar_t *)malloc((strlen(arg)+1)*sizeof(wchar_t));
if (!res) goto oom;
in = (unsigned char*)arg;
out = res;
while(*in)
if(*in < 128)
*out++ = *in++;
else
*out++ = 0xdc00 + *in++;
*out = 0;
#endif
return res;
oom:
fprintf(stderr, "out of memory\\n");
return NULL;
}
int
%(main_method)s(int argc, char **argv)
{
if (!argc) {
return __Pyx_main(0, NULL);
}
else {
int i, res;
wchar_t **argv_copy = (wchar_t **)malloc(sizeof(wchar_t*)*argc);
/* We need a second copy, as Python might modify the first one. */
wchar_t **argv_copy2 = (wchar_t **)malloc(sizeof(wchar_t*)*argc);
char *oldloc = strdup(setlocale(LC_ALL, NULL));
if (!argv_copy || !argv_copy2 || !oldloc) {
fprintf(stderr, "out of memory\\n");
free(argv_copy);
free(argv_copy2);
free(oldloc);
return 1;
}
res = 0;
setlocale(LC_ALL, "");
for (i = 0; i < argc; i++) {
argv_copy2[i] = argv_copy[i] = __Pyx_char2wchar(argv[i]);
if (!argv_copy[i]) res = 1; /* failure, but continue to simplify cleanup */
}
setlocale(LC_ALL, oldloc);
free(oldloc);
if (res == 0)
res = __Pyx_main(argc, argv_copy);
for (i = 0; i < argc; i++) {
#if PY_VERSION_HEX < 0x03050000
free(argv_copy2[i]);
#else
PyMem_RawFree(argv_copy2[i]);
#endif
}
free(argv_copy);
free(argv_copy2);
return res;
}
}
#endif
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/Exceptions.c 0000644 0001751 0000171 00000064137 00000000000 021147 0 ustar 00runner docker 0000000 0000000 // Exception raising code
//
// Exceptions are raised by __Pyx_Raise() and stored as plain
// type/value/tb in PyThreadState->curexc_*. When being caught by an
// 'except' statement, curexc_* is moved over to exc_* by
// __Pyx_GetException()
/////////////// PyThreadStateGet.proto ///////////////
//@substitute: naming
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *$local_tstate_cname;
#define __Pyx_PyThreadState_assign $local_tstate_cname = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() $local_tstate_cname->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/////////////// PyErrExceptionMatches.proto ///////////////
//@substitute: naming
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState($local_tstate_cname, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/////////////// PyErrExceptionMatches ///////////////
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
// the tighter subtype checking in Py3 allows faster out-of-order comparison
for (i=0; icurexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/////////////// PyErrFetchRestore.proto ///////////////
//@substitute: naming
//@requires: PyThreadStateGet
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState($local_tstate_cname, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState($local_tstate_cname, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); /*proto*/
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/////////////// PyErrFetchRestore ///////////////
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/////////////// RaiseException.proto ///////////////
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /*proto*/
/////////////// RaiseException ///////////////
//@requires: PyErrFetchRestore
//@requires: PyThreadStateGet
// The following function is based on do_raise() from ceval.c. There
// are separate versions for Python2 and Python3 as exception handling
// has changed quite a lot between the two versions.
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
/* 'cause' is only used in Py3 */
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
/* instantiate the type now (we don't know when and how it will be caught) */
#if CYTHON_COMPILING_IN_PYPY
/* PyPy can't handle value == NULL */
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
/* Raising an instance. The value should be a dummy. */
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
/* Normalize to raise , */
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else /* Python 3+ */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
// make sure value is an exception instance of type
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
// error on subclass test
goto bad;
} else {
// believe the instance
type = instance_class;
}
}
}
if (!instance_class) {
// instantiate the type now (we don't know when and how it will be caught)
// assuming that 'value' is an argument to the type's constructor
// not using PyErr_NormalizeException() to avoid ref-counting problems
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
// raise ... from None
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/////////////// GetTopmostException.proto ///////////////
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/////////////// GetTopmostException ///////////////
#if CYTHON_USE_EXC_INFO_STACK
// Copied from errors.c in CPython.
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/////////////// GetException.proto ///////////////
//@substitute: naming
//@requires: PyThreadStateGet
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException($local_tstate_cname, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#endif
/////////////// GetException ///////////////
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
// traceback may be NULL for freshly raised exceptions
Py_XINCREF(local_tb);
// exception state may be temporarily empty in parallel loops (race condition)
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
// Make sure tstate is in a consistent state when we XDECREF
// these objects (DECREF may run arbitrary code).
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/////////////// ReRaiseException.proto ///////////////
static CYTHON_INLINE void __Pyx_ReraiseException(void); /*proto*/
/////////////// ReRaiseException ///////////////
//@requires: GetTopmostException
static CYTHON_INLINE void __Pyx_ReraiseException(void) {
PyObject *type = NULL, *value = NULL, *tb = NULL;
#if CYTHON_FAST_THREAD_STATE
PyThreadState *tstate = PyThreadState_GET();
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
type = exc_info->exc_type;
value = exc_info->exc_value;
tb = exc_info->exc_traceback;
#else
type = tstate->exc_type;
value = tstate->exc_value;
tb = tstate->exc_traceback;
#endif
#else
PyErr_GetExcInfo(&type, &value, &tb);
#endif
if (!type || type == Py_None) {
#if !CYTHON_FAST_THREAD_STATE
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(tb);
#endif
// message copied from Py3
PyErr_SetString(PyExc_RuntimeError,
"No active exception to reraise");
} else {
#if CYTHON_FAST_THREAD_STATE
Py_INCREF(type);
Py_XINCREF(value);
Py_XINCREF(tb);
#endif
PyErr_Restore(type, value, tb);
}
}
/////////////// SaveResetException.proto ///////////////
//@substitute: naming
//@requires: PyThreadStateGet
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave($local_tstate_cname, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset($local_tstate_cname, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); /*proto*/
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/////////////// SaveResetException ///////////////
//@requires: GetTopmostException
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/////////////// SwapException.proto ///////////////
//@substitute: naming
//@requires: PyThreadStateGet
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap($local_tstate_cname, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); /*proto*/
#endif
/////////////// SwapException ///////////////
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/////////////// WriteUnraisableException.proto ///////////////
static void __Pyx_WriteUnraisable(const char *name, int clineno,
int lineno, const char *filename,
int full_traceback, int nogil); /*proto*/
/////////////// WriteUnraisableException ///////////////
//@requires: PyErrFetchRestore
//@requires: PyThreadStateGet
static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
int full_traceback, CYTHON_UNUSED int nogil) {
PyObject *old_exc, *old_val, *old_tb;
PyObject *ctx;
__Pyx_PyThreadState_declare
#ifdef WITH_THREAD
PyGILState_STATE state;
if (nogil)
state = PyGILState_Ensure();
#ifdef _MSC_VER
/* arbitrary, to suppress warning */
else state = (PyGILState_STATE)-1;
#endif
#endif
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
if (full_traceback) {
Py_XINCREF(old_exc);
Py_XINCREF(old_val);
Py_XINCREF(old_tb);
__Pyx_ErrRestore(old_exc, old_val, old_tb);
PyErr_PrintEx(1);
}
#if PY_MAJOR_VERSION < 3
ctx = PyString_FromString(name);
#else
ctx = PyUnicode_FromString(name);
#endif
__Pyx_ErrRestore(old_exc, old_val, old_tb);
if (!ctx) {
PyErr_WriteUnraisable(Py_None);
} else {
PyErr_WriteUnraisable(ctx);
Py_DECREF(ctx);
}
#ifdef WITH_THREAD
if (nogil)
PyGILState_Release(state);
#endif
}
/////////////// CLineInTraceback.proto ///////////////
#ifdef CYTHON_CLINE_IN_TRACEBACK /* 0 or 1 to disable/enable C line display in tracebacks at C compile time */
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);/*proto*/
#endif
/////////////// CLineInTraceback ///////////////
//@requires: ObjectHandling.c::PyObjectGetAttrStr
//@requires: ObjectHandling.c::PyDictVersioning
//@requires: PyErrFetchRestore
//@substitute: naming
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!${cython_runtime_cname})) {
// Very early error where the runtime module is not set up yet.
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(${cython_runtime_cname});
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, PYIDENT("cline_in_traceback")))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(${cython_runtime_cname}, PYIDENT("cline_in_traceback"));
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
// No need to handle errors here when we reset the exception state just afterwards.
(void) PyObject_SetAttr(${cython_runtime_cname}, PYIDENT("cline_in_traceback"), Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/////////////// AddTraceback.proto ///////////////
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename); /*proto*/
/////////////// AddTraceback ///////////////
//@requires: ModuleSetupCode.c::CodeObjectCache
//@requires: CLineInTraceback
//@substitute: naming
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = NULL;
PyObject *py_funcname = NULL;
#if PY_MAJOR_VERSION < 3
PyObject *py_srcfile = NULL;
py_srcfile = PyString_FromString(filename);
if (!py_srcfile) goto bad;
#endif
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, $cfilenm_cname, c_line);
if (!py_funcname) goto bad;
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, $cfilenm_cname, c_line);
if (!py_funcname) goto bad;
funcname = PyUnicode_AsUTF8(py_funcname);
if (!funcname) goto bad;
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
if (!py_funcname) goto bad;
#endif
}
#if PY_MAJOR_VERSION < 3
py_code = __Pyx_PyCode_New(
0, /*int argcount,*/
0, /*int kwonlyargcount,*/
0, /*int nlocals,*/
0, /*int stacksize,*/
0, /*int flags,*/
$empty_bytes, /*PyObject *code,*/
$empty_tuple, /*PyObject *consts,*/
$empty_tuple, /*PyObject *names,*/
$empty_tuple, /*PyObject *varnames,*/
$empty_tuple, /*PyObject *freevars,*/
$empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line, /*int firstlineno,*/
$empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
#else
py_code = PyCode_NewEmpty(filename, funcname, py_line);
#endif
Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline
return py_code;
bad:
Py_XDECREF(py_funcname);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_srcfile);
#endif
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
// Negate to avoid collisions between py and c lines.
py_code = $global_code_object_cache_find(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
$global_code_object_cache_insert(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
$moddict_cname, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/ExtensionTypes.c 0000644 0001751 0000171 00000025223 00000000000 022020 0 ustar 00runner docker 0000000 0000000 /////////////// PyType_Ready.proto ///////////////
static int __Pyx_PyType_Ready(PyTypeObject *t);
/////////////// PyType_Ready ///////////////
// Wrapper around PyType_Ready() with some runtime checks and fixes
// to deal with multiple inheritance.
static int __Pyx_PyType_Ready(PyTypeObject *t) {
// Loop over all bases (except the first) and check that those
// really are heap types. Otherwise, it would not be safe to
// subclass them.
//
// We also check tp_dictoffset: it is unsafe to inherit
// tp_dictoffset from a base class because the object structures
// would not be compatible. So, if our extension type doesn't set
// tp_dictoffset (i.e. there is no __dict__ attribute in the object
// structure), we need to check that none of the base classes sets
// it either.
int r;
PyObject *bases = t->tp_bases;
if (bases)
{
Py_ssize_t i, n = PyTuple_GET_SIZE(bases);
for (i = 1; i < n; i++) /* Skip first base */
{
PyObject *b0 = PyTuple_GET_ITEM(bases, i);
PyTypeObject *b;
#if PY_MAJOR_VERSION < 3
/* Disallow old-style classes */
if (PyClass_Check(b0))
{
PyErr_Format(PyExc_TypeError, "base class '%.200s' is an old-style class",
PyString_AS_STRING(((PyClassObject*)b0)->cl_name));
return -1;
}
#endif
b = (PyTypeObject*)b0;
if (!PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE))
{
PyErr_Format(PyExc_TypeError, "base class '%.200s' is not a heap type",
b->tp_name);
return -1;
}
if (t->tp_dictoffset == 0 && b->tp_dictoffset)
{
PyErr_Format(PyExc_TypeError,
"extension type '%.200s' has no __dict__ slot, but base type '%.200s' has: "
"either add 'cdef dict __dict__' to the extension type "
"or add '__slots__ = [...]' to the base type",
t->tp_name, b->tp_name);
return -1;
}
}
}
#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION)
{
// Make sure GC does not pick up our non-heap type as heap type with this hack!
// For details, see https://github.com/cython/cython/issues/3603
PyObject *ret, *py_status;
int gc_was_enabled;
PyObject *gc = PyImport_Import(PYUNICODE("gc"));
if (unlikely(!gc)) return -1;
py_status = PyObject_CallMethodObjArgs(gc, PYUNICODE("isenabled"), NULL);
if (unlikely(!py_status)) {
Py_DECREF(gc);
return -1;
}
gc_was_enabled = __Pyx_PyObject_IsTrue(py_status);
Py_DECREF(py_status);
if (gc_was_enabled > 0) {
ret = PyObject_CallMethodObjArgs(gc, PYUNICODE("disable"), NULL);
if (unlikely(!ret)) {
Py_DECREF(gc);
return -1;
}
Py_DECREF(ret);
} else if (unlikely(gc_was_enabled == -1)) {
Py_DECREF(gc);
return -1;
}
// As of https://bugs.python.org/issue22079
// PyType_Ready enforces that all bases of a non-heap type are
// non-heap. We know that this is the case for the solid base but
// other bases are heap allocated and are kept alive through the
// tp_bases reference.
// Other than this check, the Py_TPFLAGS_HEAPTYPE flag is unused
// in PyType_Ready().
t->tp_flags |= Py_TPFLAGS_HEAPTYPE;
#endif
r = PyType_Ready(t);
#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION)
t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE;
if (gc_was_enabled) {
PyObject *t, *v, *tb;
PyErr_Fetch(&t, &v, &tb);
ret = PyObject_CallMethodObjArgs(gc, PYUNICODE("enable"), NULL);
if (likely(ret || r == -1)) {
Py_XDECREF(ret);
// do not overwrite exceptions raised by PyType_Ready() above
PyErr_Restore(t, v, tb);
} else {
// PyType_Ready() succeeded, but gc.enable() failed.
Py_XDECREF(t);
Py_XDECREF(v);
Py_XDECREF(tb);
r = -1;
}
}
Py_DECREF(gc);
}
#endif
return r;
}
/////////////// CallNextTpDealloc.proto ///////////////
static void __Pyx_call_next_tp_dealloc(PyObject* obj, destructor current_tp_dealloc);
/////////////// CallNextTpDealloc ///////////////
static void __Pyx_call_next_tp_dealloc(PyObject* obj, destructor current_tp_dealloc) {
PyTypeObject* type = Py_TYPE(obj);
/* try to find the first parent type that has a different tp_dealloc() function */
while (type && type->tp_dealloc != current_tp_dealloc)
type = type->tp_base;
while (type && type->tp_dealloc == current_tp_dealloc)
type = type->tp_base;
if (type)
type->tp_dealloc(obj);
}
/////////////// CallNextTpTraverse.proto ///////////////
static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse);
/////////////// CallNextTpTraverse ///////////////
static int __Pyx_call_next_tp_traverse(PyObject* obj, visitproc v, void *a, traverseproc current_tp_traverse) {
PyTypeObject* type = Py_TYPE(obj);
/* try to find the first parent type that has a different tp_traverse() function */
while (type && type->tp_traverse != current_tp_traverse)
type = type->tp_base;
while (type && type->tp_traverse == current_tp_traverse)
type = type->tp_base;
if (type && type->tp_traverse)
return type->tp_traverse(obj, v, a);
// FIXME: really ignore?
return 0;
}
/////////////// CallNextTpClear.proto ///////////////
static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_dealloc);
/////////////// CallNextTpClear ///////////////
static void __Pyx_call_next_tp_clear(PyObject* obj, inquiry current_tp_clear) {
PyTypeObject* type = Py_TYPE(obj);
/* try to find the first parent type that has a different tp_clear() function */
while (type && type->tp_clear != current_tp_clear)
type = type->tp_base;
while (type && type->tp_clear == current_tp_clear)
type = type->tp_base;
if (type && type->tp_clear)
type->tp_clear(obj);
}
/////////////// SetupReduce.proto ///////////////
static int __Pyx_setup_reduce(PyObject* type_obj);
/////////////// SetupReduce ///////////////
//@requires: ObjectHandling.c::PyObjectGetAttrStrNoError
//@requires: ObjectHandling.c::PyObjectGetAttrStr
//@substitute: naming
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, PYIDENT("__name__"));
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, PYIDENT("__getstate__"))) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, PYIDENT("__getstate__"))) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, PYIDENT("__reduce_ex__")); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, PYIDENT("__reduce_ex__")); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, PYIDENT("__reduce_ex__")); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, PYIDENT("__reduce__")); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, PYIDENT("__reduce__")); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, PYIDENT("__reduce__")); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, PYIDENT("__reduce_cython__"))) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__reduce_cython__"));
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__reduce__"), reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__reduce_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
// Ignore if we're done, i.e. if 'reduce' already has the right name and the original is gone.
// Otherwise: error.
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, PYIDENT("__setstate__"));
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, PYIDENT("__setstate_cython__"))) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, PYIDENT("__setstate_cython__"));
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__setstate__"), setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, PYIDENT("__setstate_cython__")); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
// Ignore if we're done, i.e. if 'setstate' already has the right name and the original is gone.
// Otherwise: error.
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/FunctionArguments.c 0000644 0001751 0000171 00000027410 00000000000 022472 0 ustar 00runner docker 0000000 0000000 //////////////////// ArgTypeTest.proto ////////////////////
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact) \
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 : \
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /*proto*/
//////////////////// ArgTypeTest ////////////////////
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
//////////////////// RaiseArgTupleInvalid.proto ////////////////////
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /*proto*/
//////////////////// RaiseArgTupleInvalid ////////////////////
// __Pyx_RaiseArgtupleInvalid raises the correct exception when too
// many or too few positional arguments were found. This handles
// Py_ssize_t formatting correctly.
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
//////////////////// RaiseKeywordRequired.proto ////////////////////
static void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name); /*proto*/
//////////////////// RaiseKeywordRequired ////////////////////
static void __Pyx_RaiseKeywordRequired(const char* func_name, PyObject* kw_name) {
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() needs keyword-only argument %U", func_name, kw_name);
#else
"%s() needs keyword-only argument %s", func_name,
PyString_AS_STRING(kw_name));
#endif
}
//////////////////// RaiseDoubleKeywords.proto ////////////////////
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /*proto*/
//////////////////// RaiseDoubleKeywords ////////////////////
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
//////////////////// RaiseMappingExpected.proto ////////////////////
static void __Pyx_RaiseMappingExpectedError(PyObject* arg); /*proto*/
//////////////////// RaiseMappingExpected ////////////////////
static void __Pyx_RaiseMappingExpectedError(PyObject* arg) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not a mapping", Py_TYPE(arg)->tp_name);
}
//////////////////// KeywordStringCheck.proto ////////////////////
static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); /*proto*/
//////////////////// KeywordStringCheck ////////////////////
// __Pyx_CheckKeywordStrings raises an error if non-string keywords
// were passed to a function, or if any keywords were passed to a
// function that does not accept them.
static int __Pyx_CheckKeywordStrings(
PyObject *kwdict,
const char* function_name,
int kw_allowed)
{
PyObject* key = 0;
Py_ssize_t pos = 0;
#if CYTHON_COMPILING_IN_PYPY
/* PyPy appears to check keywords at call time, not at unpacking time => not much to do here */
if (!kw_allowed && PyDict_Next(kwdict, &pos, &key, 0))
goto invalid_keyword;
return 1;
#else
while (PyDict_Next(kwdict, &pos, &key, 0)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyString_Check(key)))
#endif
if (unlikely(!PyUnicode_Check(key)))
goto invalid_keyword_type;
}
if ((!kw_allowed) && unlikely(key))
goto invalid_keyword;
return 1;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
return 0;
#endif
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
return 0;
}
//////////////////// ParseKeywords.proto ////////////////////
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[], \
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, \
const char* function_name); /*proto*/
//////////////////// ParseKeywords ////////////////////
//@requires: RaiseDoubleKeywords
// __Pyx_ParseOptionalKeywords copies the optional/unknown keyword
// arguments from the kwds dict into kwds2. If kwds2 is NULL, unknown
// keywords will raise an invalid keyword error.
//
// Three kinds of errors are checked: 1) non-string keywords, 2)
// unexpected keywords and 3) overlap with positional arguments.
//
// If num_posargs is greater 0, it denotes the number of positional
// arguments that were passed and that must therefore not appear
// amongst the keywords as well.
//
// This method does not check for required keyword arguments.
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
// not found after positional args, check for duplicate
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
// In Py2, we may need to convert the argument name from str to unicode for comparison.
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
// not found after positional args, check for duplicate
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
// need to convert argument name from bytes to unicode for comparison
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
//////////////////// MergeKeywords.proto ////////////////////
static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping); /*proto*/
//////////////////// MergeKeywords ////////////////////
//@requires: RaiseDoubleKeywords
//@requires: Optimize.c::dict_iter
static int __Pyx_MergeKeywords(PyObject *kwdict, PyObject *source_mapping) {
PyObject *iter, *key = NULL, *value = NULL;
int source_is_dict, result;
Py_ssize_t orig_length, ppos = 0;
iter = __Pyx_dict_iterator(source_mapping, 0, PYIDENT("items"), &orig_length, &source_is_dict);
if (unlikely(!iter)) {
// slow fallback: try converting to dict, then iterate
PyObject *args;
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
PyErr_Clear();
args = PyTuple_Pack(1, source_mapping);
if (likely(args)) {
PyObject *fallback = PyObject_Call((PyObject*)&PyDict_Type, args, NULL);
Py_DECREF(args);
if (likely(fallback)) {
iter = __Pyx_dict_iterator(fallback, 1, PYIDENT("items"), &orig_length, &source_is_dict);
Py_DECREF(fallback);
}
}
if (unlikely(!iter)) goto bad;
}
while (1) {
result = __Pyx_dict_iter_next(iter, orig_length, &ppos, &key, &value, NULL, source_is_dict);
if (unlikely(result < 0)) goto bad;
if (!result) break;
if (unlikely(PyDict_Contains(kwdict, key))) {
__Pyx_RaiseDoubleKeywordsError("function", key);
result = -1;
} else {
result = PyDict_SetItem(kwdict, key, value);
}
Py_DECREF(key);
Py_DECREF(value);
if (unlikely(result < 0)) goto bad;
}
Py_XDECREF(iter);
return 0;
bad:
Py_XDECREF(iter);
return -1;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/ImportExport.c 0000644 0001751 0000171 00000053417 00000000000 021501 0 ustar 00runner docker 0000000 0000000 /////////////// PyIdentifierFromString.proto ///////////////
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
/////////////// Import.proto ///////////////
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /*proto*/
/////////////// Import ///////////////
//@requires: ObjectHandling.c::PyObjectGetAttrStr
//@substitute: naming
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr($builtins_cname, PYIDENT("__import__"));
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict($module_cname);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
// Avoid C compiler warning if strchr() evaluates to false at compile time.
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
/* try package relative import first */
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0; /* try absolute import on failure */
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/////////////// ImportFrom.proto ///////////////
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /*proto*/
/////////////// ImportFrom ///////////////
//@requires: ObjectHandling.c::PyObjectGetAttrStr
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/////////////// ImportStar ///////////////
//@substitute: naming
/* import_all_from is an unexposed function from ceval.c */
static int
__Pyx_import_all_from(PyObject *locals, PyObject *v)
{
PyObject *all = PyObject_GetAttrString(v, "__all__");
PyObject *dict, *name, *value;
int skip_leading_underscores = 0;
int pos, err;
if (all == NULL) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return -1; /* Unexpected error */
PyErr_Clear();
dict = PyObject_GetAttrString(v, "__dict__");
if (dict == NULL) {
if (!PyErr_ExceptionMatches(PyExc_AttributeError))
return -1;
PyErr_SetString(PyExc_ImportError,
"from-import-* object has no __dict__ and no __all__");
return -1;
}
#if PY_MAJOR_VERSION < 3
all = PyObject_CallMethod(dict, (char *)"keys", NULL);
#else
all = PyMapping_Keys(dict);
#endif
Py_DECREF(dict);
if (all == NULL)
return -1;
skip_leading_underscores = 1;
}
for (pos = 0, err = 0; ; pos++) {
name = PySequence_GetItem(all, pos);
if (name == NULL) {
if (!PyErr_ExceptionMatches(PyExc_IndexError))
err = -1;
else
PyErr_Clear();
break;
}
if (skip_leading_underscores &&
#if PY_MAJOR_VERSION < 3
likely(PyString_Check(name)) &&
PyString_AS_STRING(name)[0] == '_')
#else
likely(PyUnicode_Check(name)) &&
likely(__Pyx_PyUnicode_GET_LENGTH(name)) &&
__Pyx_PyUnicode_READ_CHAR(name, 0) == '_')
#endif
{
Py_DECREF(name);
continue;
}
value = PyObject_GetAttr(v, name);
if (value == NULL)
err = -1;
else if (PyDict_CheckExact(locals))
err = PyDict_SetItem(locals, name, value);
else
err = PyObject_SetItem(locals, name, value);
Py_DECREF(name);
Py_XDECREF(value);
if (err != 0)
break;
}
Py_DECREF(all);
return err;
}
static int ${import_star}(PyObject* m) {
int i;
int ret = -1;
char* s;
PyObject *locals = 0;
PyObject *list = 0;
#if PY_MAJOR_VERSION >= 3
PyObject *utf8_name = 0;
#endif
PyObject *name;
PyObject *item;
locals = PyDict_New(); if (!locals) goto bad;
if (__Pyx_import_all_from(locals, m) < 0) goto bad;
list = PyDict_Items(locals); if (!list) goto bad;
for(i=0; i= 3
utf8_name = PyUnicode_AsUTF8String(name);
if (!utf8_name) goto bad;
s = PyBytes_AS_STRING(utf8_name);
if (${import_star_set}(item, name, s) < 0) goto bad;
Py_DECREF(utf8_name); utf8_name = 0;
#else
s = PyString_AsString(name);
if (!s) goto bad;
if (${import_star_set}(item, name, s) < 0) goto bad;
#endif
}
ret = 0;
bad:
Py_XDECREF(locals);
Py_XDECREF(list);
#if PY_MAJOR_VERSION >= 3
Py_XDECREF(utf8_name);
#endif
return ret;
}
/////////////// SetPackagePathFromImportLib.proto ///////////////
// PY_VERSION_HEX >= 0x03030000
#if PY_MAJOR_VERSION >= 3 && !CYTHON_PEP489_MULTI_PHASE_INIT
static int __Pyx_SetPackagePathFromImportLib(const char* parent_package_name, PyObject *module_name);
#else
#define __Pyx_SetPackagePathFromImportLib(a, b) 0
#endif
/////////////// SetPackagePathFromImportLib ///////////////
//@requires: ObjectHandling.c::PyObjectGetAttrStr
//@substitute: naming
// PY_VERSION_HEX >= 0x03030000
#if PY_MAJOR_VERSION >= 3 && !CYTHON_PEP489_MULTI_PHASE_INIT
static int __Pyx_SetPackagePathFromImportLib(const char* parent_package_name, PyObject *module_name) {
PyObject *importlib, *loader, *osmod, *ossep, *parts, *package_path;
PyObject *path = NULL, *file_path = NULL;
int result;
if (parent_package_name) {
PyObject *package = PyImport_ImportModule(parent_package_name);
if (unlikely(!package))
goto bad;
path = PyObject_GetAttrString(package, "__path__");
Py_DECREF(package);
if (unlikely(!path) || unlikely(path == Py_None))
goto bad;
} else {
path = Py_None; Py_INCREF(Py_None);
}
// package_path = [importlib.find_loader(module_name, path).path.rsplit(os.sep, 1)[0]]
importlib = PyImport_ImportModule("importlib");
if (unlikely(!importlib))
goto bad;
loader = PyObject_CallMethod(importlib, "find_loader", "(OO)", module_name, path);
Py_DECREF(importlib);
Py_DECREF(path); path = NULL;
if (unlikely(!loader))
goto bad;
file_path = PyObject_GetAttrString(loader, "path");
Py_DECREF(loader);
if (unlikely(!file_path))
goto bad;
if (unlikely(PyObject_SetAttrString($module_cname, "__file__", file_path) < 0))
goto bad;
osmod = PyImport_ImportModule("os");
if (unlikely(!osmod))
goto bad;
ossep = PyObject_GetAttrString(osmod, "sep");
Py_DECREF(osmod);
if (unlikely(!ossep))
goto bad;
parts = PyObject_CallMethod(file_path, "rsplit", "(Oi)", ossep, 1);
Py_DECREF(file_path); file_path = NULL;
Py_DECREF(ossep);
if (unlikely(!parts))
goto bad;
package_path = Py_BuildValue("[O]", PyList_GET_ITEM(parts, 0));
Py_DECREF(parts);
if (unlikely(!package_path))
goto bad;
goto set_path;
bad:
PyErr_WriteUnraisable(module_name);
Py_XDECREF(path);
Py_XDECREF(file_path);
// set an empty path list on failure
PyErr_Clear();
package_path = PyList_New(0);
if (unlikely(!package_path))
return -1;
set_path:
result = PyObject_SetAttrString($module_cname, "__path__", package_path);
Py_DECREF(package_path);
return result;
}
#endif
/////////////// TypeImport.proto ///////////////
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); /*proto*/
#endif
/////////////// TypeImport ///////////////
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
/* check_size == __Pyx_ImportType_CheckSize_Ignore does not warn nor error */
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/////////////// FunctionImport.proto ///////////////
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig); /*proto*/
/////////////// FunctionImport ///////////////
//@substitute: naming
#ifndef __PYX_HAVE_RT_ImportFunction
#define __PYX_HAVE_RT_ImportFunction
static int __Pyx_ImportFunction(PyObject *module, const char *funcname, void (**f)(void), const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
union {
void (*fp)(void);
void *p;
} tmp;
d = PyObject_GetAttrString(module, (char *)"$api_name");
if (!d)
goto bad;
cobj = PyDict_GetItemString(d, funcname);
if (!cobj) {
PyErr_Format(PyExc_ImportError,
"%.200s does not export expected C function %.200s",
PyModule_GetName(module), funcname);
goto bad;
}
#if PY_VERSION_HEX >= 0x02070000
if (!PyCapsule_IsValid(cobj, sig)) {
PyErr_Format(PyExc_TypeError,
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), funcname, sig, PyCapsule_GetName(cobj));
goto bad;
}
tmp.p = PyCapsule_GetPointer(cobj, sig);
#else
{const char *desc, *s1, *s2;
desc = (const char *)PyCObject_GetDesc(cobj);
if (!desc)
goto bad;
s1 = desc; s2 = sig;
while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
if (*s1 != *s2) {
PyErr_Format(PyExc_TypeError,
"C function %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), funcname, sig, desc);
goto bad;
}
tmp.p = PyCObject_AsVoidPtr(cobj);}
#endif
*f = tmp.fp;
if (!(*f))
goto bad;
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(d);
return -1;
}
#endif
/////////////// FunctionExport.proto ///////////////
static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig); /*proto*/
/////////////// FunctionExport ///////////////
//@substitute: naming
static int __Pyx_ExportFunction(const char *name, void (*f)(void), const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
union {
void (*fp)(void);
void *p;
} tmp;
d = PyObject_GetAttrString($module_cname, (char *)"$api_name");
if (!d) {
PyErr_Clear();
d = PyDict_New();
if (!d)
goto bad;
Py_INCREF(d);
if (PyModule_AddObject($module_cname, (char *)"$api_name", d) < 0)
goto bad;
}
tmp.fp = f;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(tmp.p, sig, 0);
#else
cobj = PyCObject_FromVoidPtrAndDesc(tmp.p, (void *)sig, 0);
#endif
if (!cobj)
goto bad;
if (PyDict_SetItemString(d, name, cobj) < 0)
goto bad;
Py_DECREF(cobj);
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(cobj);
Py_XDECREF(d);
return -1;
}
/////////////// VoidPtrImport.proto ///////////////
static int __Pyx_ImportVoidPtr(PyObject *module, const char *name, void **p, const char *sig); /*proto*/
/////////////// VoidPtrImport ///////////////
//@substitute: naming
#ifndef __PYX_HAVE_RT_ImportVoidPtr
#define __PYX_HAVE_RT_ImportVoidPtr
static int __Pyx_ImportVoidPtr(PyObject *module, const char *name, void **p, const char *sig) {
PyObject *d = 0;
PyObject *cobj = 0;
d = PyObject_GetAttrString(module, (char *)"$api_name");
if (!d)
goto bad;
cobj = PyDict_GetItemString(d, name);
if (!cobj) {
PyErr_Format(PyExc_ImportError,
"%.200s does not export expected C variable %.200s",
PyModule_GetName(module), name);
goto bad;
}
#if PY_VERSION_HEX >= 0x02070000
if (!PyCapsule_IsValid(cobj, sig)) {
PyErr_Format(PyExc_TypeError,
"C variable %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), name, sig, PyCapsule_GetName(cobj));
goto bad;
}
*p = PyCapsule_GetPointer(cobj, sig);
#else
{const char *desc, *s1, *s2;
desc = (const char *)PyCObject_GetDesc(cobj);
if (!desc)
goto bad;
s1 = desc; s2 = sig;
while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
if (*s1 != *s2) {
PyErr_Format(PyExc_TypeError,
"C variable %.200s.%.200s has wrong signature (expected %.500s, got %.500s)",
PyModule_GetName(module), name, sig, desc);
goto bad;
}
*p = PyCObject_AsVoidPtr(cobj);}
#endif
if (!(*p))
goto bad;
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(d);
return -1;
}
#endif
/////////////// VoidPtrExport.proto ///////////////
static int __Pyx_ExportVoidPtr(PyObject *name, void *p, const char *sig); /*proto*/
/////////////// VoidPtrExport ///////////////
//@substitute: naming
//@requires: ObjectHandling.c::PyObjectSetAttrStr
static int __Pyx_ExportVoidPtr(PyObject *name, void *p, const char *sig) {
PyObject *d;
PyObject *cobj = 0;
d = PyDict_GetItem($moddict_cname, PYIDENT("$api_name"));
Py_XINCREF(d);
if (!d) {
d = PyDict_New();
if (!d)
goto bad;
if (__Pyx_PyObject_SetAttrStr($module_cname, PYIDENT("$api_name"), d) < 0)
goto bad;
}
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, 0);
#else
cobj = PyCObject_FromVoidPtrAndDesc(p, (void *)sig, 0);
#endif
if (!cobj)
goto bad;
if (PyDict_SetItem(d, name, cobj) < 0)
goto bad;
Py_DECREF(cobj);
Py_DECREF(d);
return 0;
bad:
Py_XDECREF(cobj);
Py_XDECREF(d);
return -1;
}
/////////////// SetVTable.proto ///////////////
static int __Pyx_SetVtable(PyObject *dict, void *vtable); /*proto*/
/////////////// SetVTable ///////////////
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, PYIDENT("__pyx_vtable__"), ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/////////////// GetVTable.proto ///////////////
static void* __Pyx_GetVtable(PyObject *dict); /*proto*/
/////////////// GetVTable ///////////////
static void* __Pyx_GetVtable(PyObject *dict) {
void* ptr;
PyObject *ob = PyObject_GetItem(dict, PYIDENT("__pyx_vtable__"));
if (!ob)
goto bad;
#if PY_VERSION_HEX >= 0x02070000
ptr = PyCapsule_GetPointer(ob, 0);
#else
ptr = PyCObject_AsVoidPtr(ob);
#endif
if (!ptr && !PyErr_Occurred())
PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type");
Py_DECREF(ob);
return ptr;
bad:
Py_XDECREF(ob);
return NULL;
}
/////////////// MergeVTables.proto ///////////////
//@requires: GetVTable
static int __Pyx_MergeVtables(PyTypeObject *type); /*proto*/
/////////////// MergeVTables ///////////////
static int __Pyx_MergeVtables(PyTypeObject *type) {
int i;
void** base_vtables;
void* unknown = (void*)-1;
PyObject* bases = type->tp_bases;
int base_depth = 0;
{
PyTypeObject* base = type->tp_base;
while (base) {
base_depth += 1;
base = base->tp_base;
}
}
base_vtables = (void**) malloc(sizeof(void*) * (size_t)(base_depth + 1));
base_vtables[0] = unknown;
// Could do MRO resolution of individual methods in the future, assuming
// compatible vtables, but for now simply require a common vtable base.
// Note that if the vtables of various bases are extended separately,
// resolution isn't possible and we must reject it just as when the
// instance struct is so extended. (It would be good to also do this
// check when a multiple-base class is created in pure Python as well.)
for (i = 1; i < PyTuple_GET_SIZE(bases); i++) {
void* base_vtable = __Pyx_GetVtable(((PyTypeObject*)PyTuple_GET_ITEM(bases, i))->tp_dict);
if (base_vtable != NULL) {
int j;
PyTypeObject* base = type->tp_base;
for (j = 0; j < base_depth; j++) {
if (base_vtables[j] == unknown) {
base_vtables[j] = __Pyx_GetVtable(base->tp_dict);
base_vtables[j + 1] = unknown;
}
if (base_vtables[j] == base_vtable) {
break;
} else if (base_vtables[j] == NULL) {
// No more potential matching bases (with vtables).
goto bad;
}
base = base->tp_base;
}
}
}
PyErr_Clear();
free(base_vtables);
return 0;
bad:
PyErr_Format(
PyExc_TypeError,
"multiple bases have vtable conflict: '%s' and '%s'",
type->tp_base->tp_name, ((PyTypeObject*)PyTuple_GET_ITEM(bases, i))->tp_name);
free(base_vtables);
return -1;
}
/////////////// ImportNumPyArray.proto ///////////////
static PyObject *__pyx_numpy_ndarray = NULL;
static PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void); /*proto*/
/////////////// ImportNumPyArray.cleanup ///////////////
Py_CLEAR(__pyx_numpy_ndarray);
/////////////// ImportNumPyArray ///////////////
//@requires: ImportExport.c::Import
static PyObject* __Pyx__ImportNumPyArray(void) {
PyObject *numpy_module, *ndarray_object = NULL;
numpy_module = __Pyx_Import(PYIDENT("numpy"), NULL, 0);
if (likely(numpy_module)) {
ndarray_object = PyObject_GetAttrString(numpy_module, "ndarray");
Py_DECREF(numpy_module);
}
if (unlikely(!ndarray_object)) {
// ImportError, AttributeError, ...
PyErr_Clear();
}
if (unlikely(!ndarray_object || !PyObject_TypeCheck(ndarray_object, &PyType_Type))) {
Py_XDECREF(ndarray_object);
Py_INCREF(Py_None);
ndarray_object = Py_None;
}
return ndarray_object;
}
static CYTHON_INLINE PyObject* __Pyx_ImportNumPyArrayTypeIfAvailable(void) {
if (unlikely(!__pyx_numpy_ndarray)) {
__pyx_numpy_ndarray = __Pyx__ImportNumPyArray();
}
Py_INCREF(__pyx_numpy_ndarray);
return __pyx_numpy_ndarray;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/MemoryView.pyx 0000644 0001751 0000171 00000140725 00000000000 021525 0 ustar 00runner docker 0000000 0000000 #################### View.MemoryView ####################
# This utility provides cython.array and cython.view.memoryview
from __future__ import absolute_import
cimport cython
# from cpython cimport ...
cdef extern from "Python.h":
int PyIndex_Check(object)
object PyLong_FromVoidPtr(void *)
cdef extern from "pythread.h":
ctypedef void *PyThread_type_lock
PyThread_type_lock PyThread_allocate_lock()
void PyThread_free_lock(PyThread_type_lock)
int PyThread_acquire_lock(PyThread_type_lock, int mode) nogil
void PyThread_release_lock(PyThread_type_lock) nogil
cdef extern from "":
void *memset(void *b, int c, size_t len)
cdef extern from *:
int __Pyx_GetBuffer(object, Py_buffer *, int) except -1
void __Pyx_ReleaseBuffer(Py_buffer *)
ctypedef struct PyObject
ctypedef Py_ssize_t Py_intptr_t
void Py_INCREF(PyObject *)
void Py_DECREF(PyObject *)
void* PyMem_Malloc(size_t n)
void PyMem_Free(void *p)
void* PyObject_Malloc(size_t n)
void PyObject_Free(void *p)
cdef struct __pyx_memoryview "__pyx_memoryview_obj":
Py_buffer view
PyObject *obj
__Pyx_TypeInfo *typeinfo
ctypedef struct {{memviewslice_name}}:
__pyx_memoryview *memview
char *data
Py_ssize_t shape[{{max_dims}}]
Py_ssize_t strides[{{max_dims}}]
Py_ssize_t suboffsets[{{max_dims}}]
void __PYX_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil)
void __PYX_XDEC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil)
ctypedef struct __pyx_buffer "Py_buffer":
PyObject *obj
PyObject *Py_None
cdef enum:
PyBUF_C_CONTIGUOUS,
PyBUF_F_CONTIGUOUS,
PyBUF_ANY_CONTIGUOUS
PyBUF_FORMAT
PyBUF_WRITABLE
PyBUF_STRIDES
PyBUF_INDIRECT
PyBUF_ND
PyBUF_RECORDS
PyBUF_RECORDS_RO
ctypedef struct __Pyx_TypeInfo:
pass
cdef object capsule "__pyx_capsule_create" (void *p, char *sig)
cdef int __pyx_array_getbuffer(PyObject *obj, Py_buffer view, int flags)
cdef int __pyx_memoryview_getbuffer(PyObject *obj, Py_buffer view, int flags)
cdef extern from *:
ctypedef int __pyx_atomic_int
{{memviewslice_name}} slice_copy_contig "__pyx_memoryview_copy_new_contig"(
__Pyx_memviewslice *from_mvs,
char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
bint dtype_is_object) nogil except *
bint slice_is_contig "__pyx_memviewslice_is_contig" (
{{memviewslice_name}} mvs, char order, int ndim) nogil
bint slices_overlap "__pyx_slices_overlap" ({{memviewslice_name}} *slice1,
{{memviewslice_name}} *slice2,
int ndim, size_t itemsize) nogil
cdef extern from "":
void *malloc(size_t) nogil
void free(void *) nogil
void *memcpy(void *dest, void *src, size_t n) nogil
#
### cython.array class
#
@cname("__pyx_array")
cdef class array:
cdef:
char *data
Py_ssize_t len
char *format
int ndim
Py_ssize_t *_shape
Py_ssize_t *_strides
Py_ssize_t itemsize
unicode mode # FIXME: this should have been a simple 'char'
bytes _format
void (*callback_free_data)(void *data)
# cdef object _memview
cdef bint free_data
cdef bint dtype_is_object
def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
mode="c", bint allocate_buffer=True):
cdef int idx
cdef Py_ssize_t i, dim
cdef PyObject **p
self.ndim = len(shape)
self.itemsize = itemsize
if not self.ndim:
raise ValueError("Empty shape tuple for cython.array")
if itemsize <= 0:
raise ValueError("itemsize <= 0 for cython.array")
if not isinstance(format, bytes):
format = format.encode('ASCII')
self._format = format # keep a reference to the byte string
self.format = self._format
# use single malloc() for both shape and strides
self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
self._strides = self._shape + self.ndim
if not self._shape:
raise MemoryError("unable to allocate shape and strides.")
# cdef Py_ssize_t dim, stride
for idx, dim in enumerate(shape):
if dim <= 0:
raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
self._shape[idx] = dim
cdef char order
if mode == 'fortran':
order = b'F'
self.mode = u'fortran'
elif mode == 'c':
order = b'C'
self.mode = u'c'
else:
raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
self.len = fill_contig_strides_array(self._shape, self._strides,
itemsize, self.ndim, order)
self.free_data = allocate_buffer
self.dtype_is_object = format == b'O'
if allocate_buffer:
# use malloc() for backwards compatibility
# in case external code wants to change the data pointer
self.data = malloc(self.len)
if not self.data:
raise MemoryError("unable to allocate array data.")
if self.dtype_is_object:
p = self.data
for i in range(self.len / itemsize):
p[i] = Py_None
Py_INCREF(Py_None)
@cname('getbuffer')
def __getbuffer__(self, Py_buffer *info, int flags):
cdef int bufmode = -1
if self.mode == u"c":
bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
elif self.mode == u"fortran":
bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
if not (flags & bufmode):
raise ValueError("Can only create a buffer that is contiguous in memory.")
info.buf = self.data
info.len = self.len
info.ndim = self.ndim
info.shape = self._shape
info.strides = self._strides
info.suboffsets = NULL
info.itemsize = self.itemsize
info.readonly = 0
if flags & PyBUF_FORMAT:
info.format = self.format
else:
info.format = NULL
info.obj = self
__pyx_getbuffer = capsule( &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
def __dealloc__(array self):
if self.callback_free_data != NULL:
self.callback_free_data(self.data)
elif self.free_data:
if self.dtype_is_object:
refcount_objects_in_slice(self.data, self._shape,
self._strides, self.ndim, False)
free(self.data)
PyObject_Free(self._shape)
@property
def memview(self):
return self.get_memview()
@cname('get_memview')
cdef get_memview(self):
flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
return memoryview(self, flags, self.dtype_is_object)
def __len__(self):
return self._shape[0]
def __getattr__(self, attr):
return getattr(self.memview, attr)
def __getitem__(self, item):
return self.memview[item]
def __setitem__(self, item, value):
self.memview[item] = value
@cname("__pyx_array_new")
cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format,
char *mode, char *buf):
cdef array result
if buf == NULL:
result = array(shape, itemsize, format, mode.decode('ASCII'))
else:
result = array(shape, itemsize, format, mode.decode('ASCII'),
allocate_buffer=False)
result.data = buf
return result
#
### Memoryview constants and cython.view.memoryview class
#
# Disable generic_contiguous, as it makes trouble verifying contiguity:
# - 'contiguous' or '::1' means the dimension is contiguous with dtype
# - 'indirect_contiguous' means a contiguous list of pointers
# - dtype contiguous must be contiguous in the first or last dimension
# from the start, or from the dimension following the last indirect dimension
#
# e.g.
# int[::indirect_contiguous, ::contiguous, :]
#
# is valid (list of pointers to 2d fortran-contiguous array), but
#
# int[::generic_contiguous, ::contiguous, :]
#
# would mean you'd have assert dimension 0 to be indirect (and pointer contiguous) at runtime.
# So it doesn't bring any performance benefit, and it's only confusing.
@cname('__pyx_MemviewEnum')
cdef class Enum(object):
cdef object name
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
cdef generic = Enum("")
cdef strided = Enum("") # default
cdef indirect = Enum("")
# Disable generic_contiguous, as it is a troublemaker
#cdef generic_contiguous = Enum("")
cdef contiguous = Enum("")
cdef indirect_contiguous = Enum("")
# 'follow' is implied when the first or last axis is ::1
@cname('__pyx_align_pointer')
cdef void *align_pointer(void *memory, size_t alignment) nogil:
"Align pointer memory on a given boundary"
cdef Py_intptr_t aligned_p = memory
cdef size_t offset
with cython.cdivision(True):
offset = aligned_p % alignment
if offset > 0:
aligned_p += alignment - offset
return aligned_p
# pre-allocate thread locks for reuse
## note that this could be implemented in a more beautiful way in "normal" Cython,
## but this code gets merged into the user module and not everything works there.
DEF THREAD_LOCKS_PREALLOCATED = 8
cdef int __pyx_memoryview_thread_locks_used = 0
cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
PyThread_allocate_lock(),
PyThread_allocate_lock(),
PyThread_allocate_lock(),
PyThread_allocate_lock(),
PyThread_allocate_lock(),
PyThread_allocate_lock(),
PyThread_allocate_lock(),
PyThread_allocate_lock(),
]
@cname('__pyx_memoryview')
cdef class memoryview(object):
cdef object obj
cdef object _size
cdef object _array_interface
cdef PyThread_type_lock lock
# the following array will contain a single __pyx_atomic int with
# suitable alignment
cdef __pyx_atomic_int acquisition_count[2]
cdef __pyx_atomic_int *acquisition_count_aligned_p
cdef Py_buffer view
cdef int flags
cdef bint dtype_is_object
cdef __Pyx_TypeInfo *typeinfo
def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
self.obj = obj
self.flags = flags
if type(self) is memoryview or obj is not None:
__Pyx_GetBuffer(obj, &self.view, flags)
if self.view.obj == NULL:
(<__pyx_buffer *> &self.view).obj = Py_None
Py_INCREF(Py_None)
global __pyx_memoryview_thread_locks_used
if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
__pyx_memoryview_thread_locks_used += 1
if self.lock is NULL:
self.lock = PyThread_allocate_lock()
if self.lock is NULL:
raise MemoryError
if flags & PyBUF_FORMAT:
self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
else:
self.dtype_is_object = dtype_is_object
self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
&self.acquisition_count[0], sizeof(__pyx_atomic_int))
self.typeinfo = NULL
def __dealloc__(memoryview self):
if self.obj is not None:
__Pyx_ReleaseBuffer(&self.view)
elif (<__pyx_buffer *> &self.view).obj == Py_None:
# Undo the incref in __cinit__() above.
(<__pyx_buffer *> &self.view).obj = NULL
Py_DECREF(Py_None)
cdef int i
global __pyx_memoryview_thread_locks_used
if self.lock != NULL:
for i in range(__pyx_memoryview_thread_locks_used):
if __pyx_memoryview_thread_locks[i] is self.lock:
__pyx_memoryview_thread_locks_used -= 1
if i != __pyx_memoryview_thread_locks_used:
__pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
break
else:
PyThread_free_lock(self.lock)
cdef char *get_item_pointer(memoryview self, object index) except NULL:
cdef Py_ssize_t dim
cdef char *itemp = self.view.buf
for dim, idx in enumerate(index):
itemp = pybuffer_index(&self.view, itemp, idx, dim)
return itemp
#@cname('__pyx_memoryview_getitem')
def __getitem__(memoryview self, object index):
if index is Ellipsis:
return self
have_slices, indices = _unellipsify(index, self.view.ndim)
cdef char *itemp
if have_slices:
return memview_slice(self, indices)
else:
itemp = self.get_item_pointer(indices)
return self.convert_item_to_object(itemp)
def __setitem__(memoryview self, object index, object value):
if self.view.readonly:
raise TypeError("Cannot assign to read-only memoryview")
have_slices, index = _unellipsify(index, self.view.ndim)
if have_slices:
obj = self.is_slice(value)
if obj:
self.setitem_slice_assignment(self[index], obj)
else:
self.setitem_slice_assign_scalar(self[index], value)
else:
self.setitem_indexed(index, value)
cdef is_slice(self, obj):
if not isinstance(obj, memoryview):
try:
obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
self.dtype_is_object)
except TypeError:
return None
return obj
cdef setitem_slice_assignment(self, dst, src):
cdef {{memviewslice_name}} dst_slice
cdef {{memviewslice_name}} src_slice
memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
get_slice_from_memview(dst, &dst_slice)[0],
src.ndim, dst.ndim, self.dtype_is_object)
cdef setitem_slice_assign_scalar(self, memoryview dst, value):
cdef int array[128]
cdef void *tmp = NULL
cdef void *item
cdef {{memviewslice_name}} *dst_slice
cdef {{memviewslice_name}} tmp_slice
dst_slice = get_slice_from_memview(dst, &tmp_slice)
if self.view.itemsize > sizeof(array):
tmp = PyMem_Malloc(self.view.itemsize)
if tmp == NULL:
raise MemoryError
item = tmp
else:
item = array
try:
if self.dtype_is_object:
( item)[0] = value
else:
self.assign_item_from_object( item, value)
# It would be easy to support indirect dimensions, but it's easier
# to disallow :)
if self.view.suboffsets != NULL:
assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
item, self.dtype_is_object)
finally:
PyMem_Free(tmp)
cdef setitem_indexed(self, index, value):
cdef char *itemp = self.get_item_pointer(index)
self.assign_item_from_object(itemp, value)
cdef convert_item_to_object(self, char *itemp):
"""Only used if instantiated manually by the user, or if Cython doesn't
know how to convert the type"""
import struct
cdef bytes bytesitem
# Do a manual and complete check here instead of this easy hack
bytesitem = itemp[:self.view.itemsize]
try:
result = struct.unpack(self.view.format, bytesitem)
except struct.error:
raise ValueError("Unable to convert item to object")
else:
if len(self.view.format) == 1:
return result[0]
return result
cdef assign_item_from_object(self, char *itemp, object value):
"""Only used if instantiated manually by the user, or if Cython doesn't
know how to convert the type"""
import struct
cdef char c
cdef bytes bytesvalue
cdef Py_ssize_t i
if isinstance(value, tuple):
bytesvalue = struct.pack(self.view.format, *value)
else:
bytesvalue = struct.pack(self.view.format, value)
for i, c in enumerate(bytesvalue):
itemp[i] = c
@cname('getbuffer')
def __getbuffer__(self, Py_buffer *info, int flags):
if flags & PyBUF_WRITABLE and self.view.readonly:
raise ValueError("Cannot create writable memory view from read-only memoryview")
if flags & PyBUF_ND:
info.shape = self.view.shape
else:
info.shape = NULL
if flags & PyBUF_STRIDES:
info.strides = self.view.strides
else:
info.strides = NULL
if flags & PyBUF_INDIRECT:
info.suboffsets = self.view.suboffsets
else:
info.suboffsets = NULL
if flags & PyBUF_FORMAT:
info.format = self.view.format
else:
info.format = NULL
info.buf = self.view.buf
info.ndim = self.view.ndim
info.itemsize = self.view.itemsize
info.len = self.view.len
info.readonly = self.view.readonly
info.obj = self
__pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
# Some properties that have the same semantics as in NumPy
@property
def T(self):
cdef _memoryviewslice result = memoryview_copy(self)
transpose_memslice(&result.from_slice)
return result
@property
def base(self):
return self.obj
@property
def shape(self):
return tuple([length for length in self.view.shape[:self.view.ndim]])
@property
def strides(self):
if self.view.strides == NULL:
# Note: we always ask for strides, so if this is not set it's a bug
raise ValueError("Buffer view does not expose strides")
return tuple([stride for stride in self.view.strides[:self.view.ndim]])
@property
def suboffsets(self):
if self.view.suboffsets == NULL:
return (-1,) * self.view.ndim
return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
@property
def ndim(self):
return self.view.ndim
@property
def itemsize(self):
return self.view.itemsize
@property
def nbytes(self):
return self.size * self.view.itemsize
@property
def size(self):
if self._size is None:
result = 1
for length in self.view.shape[:self.view.ndim]:
result *= length
self._size = result
return self._size
def __len__(self):
if self.view.ndim >= 1:
return self.view.shape[0]
return 0
def __repr__(self):
return "" % (self.base.__class__.__name__,
id(self))
def __str__(self):
return "" % (self.base.__class__.__name__,)
# Support the same attributes as memoryview slices
def is_c_contig(self):
cdef {{memviewslice_name}} *mslice
cdef {{memviewslice_name}} tmp
mslice = get_slice_from_memview(self, &tmp)
return slice_is_contig(mslice[0], 'C', self.view.ndim)
def is_f_contig(self):
cdef {{memviewslice_name}} *mslice
cdef {{memviewslice_name}} tmp
mslice = get_slice_from_memview(self, &tmp)
return slice_is_contig(mslice[0], 'F', self.view.ndim)
def copy(self):
cdef {{memviewslice_name}} mslice
cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
slice_copy(self, &mslice)
mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
self.view.itemsize,
flags|PyBUF_C_CONTIGUOUS,
self.dtype_is_object)
return memoryview_copy_from_slice(self, &mslice)
def copy_fortran(self):
cdef {{memviewslice_name}} src, dst
cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
slice_copy(self, &src)
dst = slice_copy_contig(&src, "fortran", self.view.ndim,
self.view.itemsize,
flags|PyBUF_F_CONTIGUOUS,
self.dtype_is_object)
return memoryview_copy_from_slice(self, &dst)
@cname('__pyx_memoryview_new')
cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
cdef memoryview result = memoryview(o, flags, dtype_is_object)
result.typeinfo = typeinfo
return result
@cname('__pyx_memoryview_check')
cdef inline bint memoryview_check(object o):
return isinstance(o, memoryview)
cdef tuple _unellipsify(object index, int ndim):
"""
Replace all ellipses with full slices and fill incomplete indices with
full slices.
"""
if not isinstance(index, tuple):
tup = (index,)
else:
tup = index
result = []
have_slices = False
seen_ellipsis = False
for idx, item in enumerate(tup):
if item is Ellipsis:
if not seen_ellipsis:
result.extend([slice(None)] * (ndim - len(tup) + 1))
seen_ellipsis = True
else:
result.append(slice(None))
have_slices = True
else:
if not isinstance(item, slice) and not PyIndex_Check(item):
raise TypeError("Cannot index with type '%s'" % type(item))
have_slices = have_slices or isinstance(item, slice)
result.append(item)
nslices = ndim - len(result)
if nslices:
result.extend([slice(None)] * nslices)
return have_slices or nslices, tuple(result)
cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
for suboffset in suboffsets[:ndim]:
if suboffset >= 0:
raise ValueError("Indirect dimensions not supported")
#
### Slicing a memoryview
#
@cname('__pyx_memview_slice')
cdef memoryview memview_slice(memoryview memview, object indices):
cdef int new_ndim = 0, suboffset_dim = -1, dim
cdef bint negative_step
cdef {{memviewslice_name}} src, dst
cdef {{memviewslice_name}} *p_src
# dst is copied by value in memoryview_fromslice -- initialize it
# src is never copied
memset(&dst, 0, sizeof(dst))
cdef _memoryviewslice memviewsliceobj
assert memview.view.ndim > 0
if isinstance(memview, _memoryviewslice):
memviewsliceobj = memview
p_src = &memviewsliceobj.from_slice
else:
slice_copy(memview, &src)
p_src = &src
# Note: don't use variable src at this point
# SubNote: we should be able to declare variables in blocks...
# memoryview_fromslice() will inc our dst slice
dst.memview = p_src.memview
dst.data = p_src.data
# Put everything in temps to avoid this bloody warning:
# "Argument evaluation order in C function call is undefined and
# may not be as expected"
cdef {{memviewslice_name}} *p_dst = &dst
cdef int *p_suboffset_dim = &suboffset_dim
cdef Py_ssize_t start, stop, step
cdef bint have_start, have_stop, have_step
for dim, index in enumerate(indices):
if PyIndex_Check(index):
slice_memviewslice(
p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
dim, new_ndim, p_suboffset_dim,
index, 0, 0, # start, stop, step
0, 0, 0, # have_{start,stop,step}
False)
elif index is None:
p_dst.shape[new_ndim] = 1
p_dst.strides[new_ndim] = 0
p_dst.suboffsets[new_ndim] = -1
new_ndim += 1
else:
start = index.start or 0
stop = index.stop or 0
step = index.step or 0
have_start = index.start is not None
have_stop = index.stop is not None
have_step = index.step is not None
slice_memviewslice(
p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
dim, new_ndim, p_suboffset_dim,
start, stop, step,
have_start, have_stop, have_step,
True)
new_ndim += 1
if isinstance(memview, _memoryviewslice):
return memoryview_fromslice(dst, new_ndim,
memviewsliceobj.to_object_func,
memviewsliceobj.to_dtype_func,
memview.dtype_is_object)
else:
return memoryview_fromslice(dst, new_ndim, NULL, NULL,
memview.dtype_is_object)
#
### Slicing in a single dimension of a memoryviewslice
#
cdef extern from "":
void abort() nogil
void printf(char *s, ...) nogil
cdef extern from "":
ctypedef struct FILE
FILE *stderr
int fputs(char *s, FILE *stream)
cdef extern from "pystate.h":
void PyThreadState_Get() nogil
# These are not actually nogil, but we check for the GIL before calling them
void PyErr_SetString(PyObject *type, char *msg) nogil
PyObject *PyErr_Format(PyObject *exc, char *msg, ...) nogil
@cname('__pyx_memoryview_slice_memviewslice')
cdef int slice_memviewslice(
{{memviewslice_name}} *dst,
Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
int dim, int new_ndim, int *suboffset_dim,
Py_ssize_t start, Py_ssize_t stop, Py_ssize_t step,
int have_start, int have_stop, int have_step,
bint is_slice) nogil except -1:
"""
Create a new slice dst given slice src.
dim - the current src dimension (indexing will make dimensions
disappear)
new_dim - the new dst dimension
suboffset_dim - pointer to a single int initialized to -1 to keep track of
where slicing offsets should be added
"""
cdef Py_ssize_t new_shape
cdef bint negative_step
if not is_slice:
# index is a normal integer-like index
if start < 0:
start += shape
if not 0 <= start < shape:
_err_dim(IndexError, "Index out of bounds (axis %d)", dim)
else:
# index is a slice
negative_step = have_step != 0 and step < 0
if have_step and step == 0:
_err_dim(ValueError, "Step may not be zero (axis %d)", dim)
# check our bounds and set defaults
if have_start:
if start < 0:
start += shape
if start < 0:
start = 0
elif start >= shape:
if negative_step:
start = shape - 1
else:
start = shape
else:
if negative_step:
start = shape - 1
else:
start = 0
if have_stop:
if stop < 0:
stop += shape
if stop < 0:
stop = 0
elif stop > shape:
stop = shape
else:
if negative_step:
stop = -1
else:
stop = shape
if not have_step:
step = 1
# len = ceil( (stop - start) / step )
with cython.cdivision(True):
new_shape = (stop - start) // step
if (stop - start) - step * new_shape:
new_shape += 1
if new_shape < 0:
new_shape = 0
# shape/strides/suboffsets
dst.strides[new_ndim] = stride * step
dst.shape[new_ndim] = new_shape
dst.suboffsets[new_ndim] = suboffset
# Add the slicing or idexing offsets to the right suboffset or base data *
if suboffset_dim[0] < 0:
dst.data += start * stride
else:
dst.suboffsets[suboffset_dim[0]] += start * stride
if suboffset >= 0:
if not is_slice:
if new_ndim == 0:
dst.data = ( dst.data)[0] + suboffset
else:
_err_dim(IndexError, "All dimensions preceding dimension %d "
"must be indexed and not sliced", dim)
else:
suboffset_dim[0] = new_ndim
return 0
#
### Index a memoryview
#
@cname('__pyx_pybuffer_index')
cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
Py_ssize_t dim) except NULL:
cdef Py_ssize_t shape, stride, suboffset = -1
cdef Py_ssize_t itemsize = view.itemsize
cdef char *resultp
if view.ndim == 0:
shape = view.len / itemsize
stride = itemsize
else:
shape = view.shape[dim]
stride = view.strides[dim]
if view.suboffsets != NULL:
suboffset = view.suboffsets[dim]
if index < 0:
index += view.shape[dim]
if index < 0:
raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
if index >= shape:
raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
resultp = bufp + index * stride
if suboffset >= 0:
resultp = ( resultp)[0] + suboffset
return resultp
#
### Transposing a memoryviewslice
#
@cname('__pyx_memslice_transpose')
cdef int transpose_memslice({{memviewslice_name}} *memslice) nogil except 0:
cdef int ndim = memslice.memview.view.ndim
cdef Py_ssize_t *shape = memslice.shape
cdef Py_ssize_t *strides = memslice.strides
# reverse strides and shape
cdef int i, j
for i in range(ndim / 2):
j = ndim - 1 - i
strides[i], strides[j] = strides[j], strides[i]
shape[i], shape[j] = shape[j], shape[i]
if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
_err(ValueError, "Cannot transpose memoryview with indirect dimensions")
return 1
#
### Creating new memoryview objects from slices and memoryviews
#
@cname('__pyx_memoryviewslice')
cdef class _memoryviewslice(memoryview):
"Internal class for passing memoryview slices to Python"
# We need this to keep our shape/strides/suboffset pointers valid
cdef {{memviewslice_name}} from_slice
# We need this only to print it's class' name
cdef object from_object
cdef object (*to_object_func)(char *)
cdef int (*to_dtype_func)(char *, object) except 0
def __dealloc__(self):
__PYX_XDEC_MEMVIEW(&self.from_slice, 1)
cdef convert_item_to_object(self, char *itemp):
if self.to_object_func != NULL:
return self.to_object_func(itemp)
else:
return memoryview.convert_item_to_object(self, itemp)
cdef assign_item_from_object(self, char *itemp, object value):
if self.to_dtype_func != NULL:
self.to_dtype_func(itemp, value)
else:
memoryview.assign_item_from_object(self, itemp, value)
@property
def base(self):
return self.from_object
__pyx_getbuffer = capsule( &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
@cname('__pyx_memoryview_fromslice')
cdef memoryview_fromslice({{memviewslice_name}} memviewslice,
int ndim,
object (*to_object_func)(char *),
int (*to_dtype_func)(char *, object) except 0,
bint dtype_is_object):
cdef _memoryviewslice result
if memviewslice.memview == Py_None:
return None
# assert 0 < ndim <= memviewslice.memview.view.ndim, (
# ndim, memviewslice.memview.view.ndim)
result = _memoryviewslice(None, 0, dtype_is_object)
result.from_slice = memviewslice
__PYX_INC_MEMVIEW(&memviewslice, 1)
result.from_object = ( memviewslice.memview).base
result.typeinfo = memviewslice.memview.typeinfo
result.view = memviewslice.memview.view
result.view.buf = memviewslice.data
result.view.ndim = ndim
(<__pyx_buffer *> &result.view).obj = Py_None
Py_INCREF(Py_None)
if (memviewslice.memview).flags & PyBUF_WRITABLE:
result.flags = PyBUF_RECORDS
else:
result.flags = PyBUF_RECORDS_RO
result.view.shape = result.from_slice.shape
result.view.strides = result.from_slice.strides
# only set suboffsets if actually used, otherwise set to NULL to improve compatibility
result.view.suboffsets = NULL
for suboffset in result.from_slice.suboffsets[:ndim]:
if suboffset >= 0:
result.view.suboffsets = result.from_slice.suboffsets
break
result.view.len = result.view.itemsize
for length in result.view.shape[:ndim]:
result.view.len *= length
result.to_object_func = to_object_func
result.to_dtype_func = to_dtype_func
return result
@cname('__pyx_memoryview_get_slice_from_memoryview')
cdef {{memviewslice_name}} *get_slice_from_memview(memoryview memview,
{{memviewslice_name}} *mslice) except NULL:
cdef _memoryviewslice obj
if isinstance(memview, _memoryviewslice):
obj = memview
return &obj.from_slice
else:
slice_copy(memview, mslice)
return mslice
@cname('__pyx_memoryview_slice_copy')
cdef void slice_copy(memoryview memview, {{memviewslice_name}} *dst):
cdef int dim
cdef (Py_ssize_t*) shape, strides, suboffsets
shape = memview.view.shape
strides = memview.view.strides
suboffsets = memview.view.suboffsets
dst.memview = <__pyx_memoryview *> memview
dst.data = memview.view.buf
for dim in range(memview.view.ndim):
dst.shape[dim] = shape[dim]
dst.strides[dim] = strides[dim]
dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
@cname('__pyx_memoryview_copy_object')
cdef memoryview_copy(memoryview memview):
"Create a new memoryview object"
cdef {{memviewslice_name}} memviewslice
slice_copy(memview, &memviewslice)
return memoryview_copy_from_slice(memview, &memviewslice)
@cname('__pyx_memoryview_copy_object_from_slice')
cdef memoryview_copy_from_slice(memoryview memview, {{memviewslice_name}} *memviewslice):
"""
Create a new memoryview object from a given memoryview object and slice.
"""
cdef object (*to_object_func)(char *)
cdef int (*to_dtype_func)(char *, object) except 0
if isinstance(memview, _memoryviewslice):
to_object_func = (<_memoryviewslice> memview).to_object_func
to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
else:
to_object_func = NULL
to_dtype_func = NULL
return memoryview_fromslice(memviewslice[0], memview.view.ndim,
to_object_func, to_dtype_func,
memview.dtype_is_object)
#
### Copy the contents of a memoryview slices
#
cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
if arg < 0:
return -arg
else:
return arg
@cname('__pyx_get_best_slice_order')
cdef char get_best_order({{memviewslice_name}} *mslice, int ndim) nogil:
"""
Figure out the best memory access order for a given slice.
"""
cdef int i
cdef Py_ssize_t c_stride = 0
cdef Py_ssize_t f_stride = 0
for i in range(ndim - 1, -1, -1):
if mslice.shape[i] > 1:
c_stride = mslice.strides[i]
break
for i in range(ndim):
if mslice.shape[i] > 1:
f_stride = mslice.strides[i]
break
if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
return 'C'
else:
return 'F'
@cython.cdivision(True)
cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides,
char *dst_data, Py_ssize_t *dst_strides,
Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
int ndim, size_t itemsize) nogil:
# Note: src_extent is 1 if we're broadcasting
# dst_extent always >= src_extent as we don't do reductions
cdef Py_ssize_t i
cdef Py_ssize_t src_extent = src_shape[0]
cdef Py_ssize_t dst_extent = dst_shape[0]
cdef Py_ssize_t src_stride = src_strides[0]
cdef Py_ssize_t dst_stride = dst_strides[0]
if ndim == 1:
if (src_stride > 0 and dst_stride > 0 and
src_stride == itemsize == dst_stride):
memcpy(dst_data, src_data, itemsize * dst_extent)
else:
for i in range(dst_extent):
memcpy(dst_data, src_data, itemsize)
src_data += src_stride
dst_data += dst_stride
else:
for i in range(dst_extent):
_copy_strided_to_strided(src_data, src_strides + 1,
dst_data, dst_strides + 1,
src_shape + 1, dst_shape + 1,
ndim - 1, itemsize)
src_data += src_stride
dst_data += dst_stride
cdef void copy_strided_to_strided({{memviewslice_name}} *src,
{{memviewslice_name}} *dst,
int ndim, size_t itemsize) nogil:
_copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides,
src.shape, dst.shape, ndim, itemsize)
@cname('__pyx_memoryview_slice_get_size')
cdef Py_ssize_t slice_get_size({{memviewslice_name}} *src, int ndim) nogil:
"Return the size of the memory occupied by the slice in number of bytes"
cdef Py_ssize_t shape, size = src.memview.view.itemsize
for shape in src.shape[:ndim]:
size *= shape
return size
@cname('__pyx_fill_contig_strides_array')
cdef Py_ssize_t fill_contig_strides_array(
Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
int ndim, char order) nogil:
"""
Fill the strides array for a slice with C or F contiguous strides.
This is like PyBuffer_FillContiguousStrides, but compatible with py < 2.6
"""
cdef int idx
if order == 'F':
for idx in range(ndim):
strides[idx] = stride
stride *= shape[idx]
else:
for idx in range(ndim - 1, -1, -1):
strides[idx] = stride
stride *= shape[idx]
return stride
@cname('__pyx_memoryview_copy_data_to_temp')
cdef void *copy_data_to_temp({{memviewslice_name}} *src,
{{memviewslice_name}} *tmpslice,
char order,
int ndim) nogil except NULL:
"""
Copy a direct slice to temporary contiguous memory. The caller should free
the result when done.
"""
cdef int i
cdef void *result
cdef size_t itemsize = src.memview.view.itemsize
cdef size_t size = slice_get_size(src, ndim)
result = malloc(size)
if not result:
_err(MemoryError, NULL)
# tmpslice[0] = src
tmpslice.data = result
tmpslice.memview = src.memview
for i in range(ndim):
tmpslice.shape[i] = src.shape[i]
tmpslice.suboffsets[i] = -1
fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
ndim, order)
# We need to broadcast strides again
for i in range(ndim):
if tmpslice.shape[i] == 1:
tmpslice.strides[i] = 0
if slice_is_contig(src[0], order, ndim):
memcpy(result, src.data, size)
else:
copy_strided_to_strided(src, tmpslice, ndim, itemsize)
return result
# Use 'with gil' functions and avoid 'with gil' blocks, as the code within the blocks
# has temporaries that need the GIL to clean up
@cname('__pyx_memoryview_err_extents')
cdef int _err_extents(int i, Py_ssize_t extent1,
Py_ssize_t extent2) except -1 with gil:
raise ValueError("got differing extents in dimension %d (got %d and %d)" %
(i, extent1, extent2))
@cname('__pyx_memoryview_err_dim')
cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
raise error(msg.decode('ascii') % dim)
@cname('__pyx_memoryview_err')
cdef int _err(object error, char *msg) except -1 with gil:
if msg != NULL:
raise error(msg.decode('ascii'))
else:
raise error
@cname('__pyx_memoryview_copy_contents')
cdef int memoryview_copy_contents({{memviewslice_name}} src,
{{memviewslice_name}} dst,
int src_ndim, int dst_ndim,
bint dtype_is_object) nogil except -1:
"""
Copy memory from slice src to slice dst.
Check for overlapping memory and verify the shapes.
"""
cdef void *tmpdata = NULL
cdef size_t itemsize = src.memview.view.itemsize
cdef int i
cdef char order = get_best_order(&src, src_ndim)
cdef bint broadcasting = False
cdef bint direct_copy = False
cdef {{memviewslice_name}} tmp
if src_ndim < dst_ndim:
broadcast_leading(&src, src_ndim, dst_ndim)
elif dst_ndim < src_ndim:
broadcast_leading(&dst, dst_ndim, src_ndim)
cdef int ndim = max(src_ndim, dst_ndim)
for i in range(ndim):
if src.shape[i] != dst.shape[i]:
if src.shape[i] == 1:
broadcasting = True
src.strides[i] = 0
else:
_err_extents(i, dst.shape[i], src.shape[i])
if src.suboffsets[i] >= 0:
_err_dim(ValueError, "Dimension %d is not direct", i)
if slices_overlap(&src, &dst, ndim, itemsize):
# slices overlap, copy to temp, copy temp to dst
if not slice_is_contig(src, order, ndim):
order = get_best_order(&dst, ndim)
tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
src = tmp
if not broadcasting:
# See if both slices have equal contiguity, in that case perform a
# direct copy. This only works when we are not broadcasting.
if slice_is_contig(src, 'C', ndim):
direct_copy = slice_is_contig(dst, 'C', ndim)
elif slice_is_contig(src, 'F', ndim):
direct_copy = slice_is_contig(dst, 'F', ndim)
if direct_copy:
# Contiguous slices with same order
refcount_copying(&dst, dtype_is_object, ndim, False)
memcpy(dst.data, src.data, slice_get_size(&src, ndim))
refcount_copying(&dst, dtype_is_object, ndim, True)
free(tmpdata)
return 0
if order == 'F' == get_best_order(&dst, ndim):
# see if both slices have Fortran order, transpose them to match our
# C-style indexing order
transpose_memslice(&src)
transpose_memslice(&dst)
refcount_copying(&dst, dtype_is_object, ndim, False)
copy_strided_to_strided(&src, &dst, ndim, itemsize)
refcount_copying(&dst, dtype_is_object, ndim, True)
free(tmpdata)
return 0
@cname('__pyx_memoryview_broadcast_leading')
cdef void broadcast_leading({{memviewslice_name}} *mslice,
int ndim,
int ndim_other) nogil:
cdef int i
cdef int offset = ndim_other - ndim
for i in range(ndim - 1, -1, -1):
mslice.shape[i + offset] = mslice.shape[i]
mslice.strides[i + offset] = mslice.strides[i]
mslice.suboffsets[i + offset] = mslice.suboffsets[i]
for i in range(offset):
mslice.shape[i] = 1
mslice.strides[i] = mslice.strides[0]
mslice.suboffsets[i] = -1
#
### Take care of refcounting the objects in slices. Do this separately from any copying,
### to minimize acquiring the GIL
#
@cname('__pyx_memoryview_refcount_copying')
cdef void refcount_copying({{memviewslice_name}} *dst, bint dtype_is_object,
int ndim, bint inc) nogil:
# incref or decref the objects in the destination slice if the dtype is
# object
if dtype_is_object:
refcount_objects_in_slice_with_gil(dst.data, dst.shape,
dst.strides, ndim, inc)
@cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape,
Py_ssize_t *strides, int ndim,
bint inc) with gil:
refcount_objects_in_slice(data, shape, strides, ndim, inc)
@cname('__pyx_memoryview_refcount_objects_in_slice')
cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape,
Py_ssize_t *strides, int ndim, bint inc):
cdef Py_ssize_t i
for i in range(shape[0]):
if ndim == 1:
if inc:
Py_INCREF(( data)[0])
else:
Py_DECREF(( data)[0])
else:
refcount_objects_in_slice(data, shape + 1, strides + 1,
ndim - 1, inc)
data += strides[0]
#
### Scalar to slice assignment
#
@cname('__pyx_memoryview_slice_assign_scalar')
cdef void slice_assign_scalar({{memviewslice_name}} *dst, int ndim,
size_t itemsize, void *item,
bint dtype_is_object) nogil:
refcount_copying(dst, dtype_is_object, ndim, False)
_slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
itemsize, item)
refcount_copying(dst, dtype_is_object, ndim, True)
@cname('__pyx_memoryview__slice_assign_scalar')
cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape,
Py_ssize_t *strides, int ndim,
size_t itemsize, void *item) nogil:
cdef Py_ssize_t i
cdef Py_ssize_t stride = strides[0]
cdef Py_ssize_t extent = shape[0]
if ndim == 1:
for i in range(extent):
memcpy(data, item, itemsize)
data += stride
else:
for i in range(extent):
_slice_assign_scalar(data, shape + 1, strides + 1,
ndim - 1, itemsize, item)
data += stride
############### BufferFormatFromTypeInfo ###############
cdef extern from *:
ctypedef struct __Pyx_StructField
cdef enum:
__PYX_BUF_FLAGS_PACKED_STRUCT
__PYX_BUF_FLAGS_INTEGER_COMPLEX
ctypedef struct __Pyx_TypeInfo:
char* name
__Pyx_StructField* fields
size_t size
size_t arraysize[8]
int ndim
char typegroup
char is_unsigned
int flags
ctypedef struct __Pyx_StructField:
__Pyx_TypeInfo* type
char* name
size_t offset
ctypedef struct __Pyx_BufFmt_StackElem:
__Pyx_StructField* field
size_t parent_offset
#ctypedef struct __Pyx_BufFmt_Context:
# __Pyx_StructField root
__Pyx_BufFmt_StackElem* head
struct __pyx_typeinfo_string:
char string[3]
__pyx_typeinfo_string __Pyx_TypeInfoToFormat(__Pyx_TypeInfo *)
@cname('__pyx_format_from_typeinfo')
cdef bytes format_from_typeinfo(__Pyx_TypeInfo *type):
cdef __Pyx_StructField *field
cdef __pyx_typeinfo_string fmt
cdef bytes part, result
if type.typegroup == 'S':
assert type.fields != NULL
assert type.fields.type != NULL
if type.flags & __PYX_BUF_FLAGS_PACKED_STRUCT:
alignment = b'^'
else:
alignment = b''
parts = [b"T{"]
field = type.fields
while field.type:
part = format_from_typeinfo(field.type)
parts.append(part + b':' + field.name + b':')
field += 1
result = alignment.join(parts) + b'}'
else:
fmt = __Pyx_TypeInfoToFormat(type)
if type.arraysize[0]:
extents = [unicode(type.arraysize[i]) for i in range(type.ndim)]
result = (u"(%s)" % u','.join(extents)).encode('ascii') + fmt.string
else:
result = fmt.string
return result
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/MemoryView_C.c 0000644 0001751 0000171 00000071073 00000000000 021370 0 ustar 00runner docker 0000000 0000000 ////////// MemviewSliceStruct.proto //////////
//@proto_block: utility_code_proto_before_types
/* memoryview slice struct */
struct {{memview_struct_name}};
typedef struct {
struct {{memview_struct_name}} *memview;
char *data;
Py_ssize_t shape[{{max_dims}}];
Py_ssize_t strides[{{max_dims}}];
Py_ssize_t suboffsets[{{max_dims}}];
} {{memviewslice_name}};
// used for "len(memviewslice)"
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/////////// Atomics.proto /////////////
//@proto_block: utility_code_proto_before_types
#include
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
// todo: Portland pgcc, maybe OS X's OSAtomicIncrement32,
// libatomic + autotools-like distutils support? Such a pain...
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 || \
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) && \
!defined(__i386__)
/* gcc >= 4.1.2 */
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
/* msvc */
#include
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview) \
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview) \
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview) \
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/////////////// ObjectToMemviewSlice.proto ///////////////
static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *, int writable_flag);
////////// MemviewSliceInit.proto //////////
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW({{memviewslice_name}} *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *, int, int);
/////////////// MemviewSliceIndex.proto ///////////////
static CYTHON_INLINE char *__pyx_memviewslice_index_full(
const char *bufp, Py_ssize_t idx, Py_ssize_t stride, Py_ssize_t suboffset);
/////////////// ObjectToMemviewSlice ///////////////
//@requires: MemviewSliceValidateAndInit
static CYTHON_INLINE {{memviewslice_name}} {{funcname}}(PyObject *obj, int writable_flag) {
{{memviewslice_name}} result = {{memslice_init}};
__Pyx_BufFmt_StackElem stack[{{struct_nesting_depth}}];
int axes_specs[] = { {{axes_specs}} };
int retcode;
if (obj == Py_None) {
/* We don't bother to refcount None */
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, {{c_or_f_flag}},
{{buf_flag}} | writable_flag, {{ndim}},
&{{dtype_typeinfo}}, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/////////////// MemviewSliceValidateAndInit.proto ///////////////
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/////////////// MemviewSliceValidateAndInit ///////////////
//@requires: Buffer.c::TypeInfoCompare
//@requires: Buffer.c::BufferFormatStructs
//@requires: Buffer.c::BufferFormatCheck
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
// Todo: without PyBUF_INDIRECT we may not have suboffset information, i.e., the
// ptr may not be set to NULL but may be uninitialized?
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
/* We have a matching dtype, skip format parsing */
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
/* Check axes */
if (buf->len > 0) {
// 0-sized arrays do not undergo these checks since their strides are
// irrelevant and they are always both C- and F-contiguous.
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
/* Check contiguity */
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
/* Initialize */
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
////////// MemviewSliceInit //////////
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
{{memviewslice_name}} *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
/* Don't decref, the memoryview may be borrowed. Let the caller do the cleanup */
/* __Pyx_XDECREF(memviewslice->memview); */
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
// available since Py3.3
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW({{memviewslice_name}} *memslice, int have_gil, int lineno)
{
int first_time;
struct {{memview_struct_name}} *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return; /* allow uninitialized memoryview assignment */
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW({{memviewslice_name}} *memslice,
int have_gil, int lineno) {
int last_time;
struct {{memview_struct_name}} *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
// we do not ref-count None
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
////////// MemviewSliceCopyTemplate.proto //////////
static {{memviewslice_name}}
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
////////// MemviewSliceCopyTemplate //////////
static {{memviewslice_name}}
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = {{memslice_init}};
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
/* initialize new_mvs */
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
////////// CopyContentsUtility.proto /////////
#define {{func_cname}}(slice) \
__pyx_memoryview_copy_new_contig(&slice, "{{mode}}", {{ndim}}, \
sizeof({{dtype_decl}}), {{contig_flag}}, \
{{dtype_is_object}})
////////// OverlappingSlices.proto //////////
static int __pyx_slices_overlap({{memviewslice_name}} *slice1,
{{memviewslice_name}} *slice2,
int ndim, size_t itemsize);
////////// OverlappingSlices //////////
/* Based on numpy's core/src/multiarray/array_assign.c */
/* Gets a half-open range [start, end) which contains the array data */
static void
__pyx_get_array_memory_extents({{memviewslice_name}} *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
/* Return a half-open range */
*out_start = start;
*out_end = end + itemsize;
}
/* Returns 1 if the arrays have overlapping data, 0 otherwise */
static int
__pyx_slices_overlap({{memviewslice_name}} *slice1,
{{memviewslice_name}} *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
////////// MemviewSliceCheckContig.proto //////////
#define __pyx_memviewslice_is_contig_{{contig_type}}{{ndim}}(slice) \
__pyx_memviewslice_is_contig(slice, '{{contig_type}}', {{ndim}})
////////// MemviewSliceIsContig.proto //////////
static int __pyx_memviewslice_is_contig(const {{memviewslice_name}} mvs, char order, int ndim);/*proto*/
////////// MemviewSliceIsContig //////////
static int
__pyx_memviewslice_is_contig(const {{memviewslice_name}} mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/////////////// MemviewSliceIndex ///////////////
static CYTHON_INLINE char *
__pyx_memviewslice_index_full(const char *bufp, Py_ssize_t idx,
Py_ssize_t stride, Py_ssize_t suboffset)
{
bufp = bufp + idx * stride;
if (suboffset >= 0) {
bufp = *((char **) bufp) + suboffset;
}
return (char *) bufp;
}
/////////////// MemviewDtypeToObject.proto ///////////////
{{if to_py_function}}
static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp); /* proto */
{{endif}}
{{if from_py_function}}
static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj); /* proto */
{{endif}}
/////////////// MemviewDtypeToObject ///////////////
{{#__pyx_memview__to_object}}
/* Convert a dtype to or from a Python object */
{{if to_py_function}}
static CYTHON_INLINE PyObject *{{get_function}}(const char *itemp) {
return (PyObject *) {{to_py_function}}(*({{dtype}} *) itemp);
}
{{endif}}
{{if from_py_function}}
static CYTHON_INLINE int {{set_function}}(const char *itemp, PyObject *obj) {
{{dtype}} value = {{from_py_function}}(obj);
if ({{error_condition}})
return 0;
*({{dtype}} *) itemp = value;
return 1;
}
{{endif}}
/////////////// MemviewObjectToObject.proto ///////////////
/* Function callbacks (for memoryview object) for dtype object */
static PyObject *{{get_function}}(const char *itemp); /* proto */
static int {{set_function}}(const char *itemp, PyObject *obj); /* proto */
/////////////// MemviewObjectToObject ///////////////
static PyObject *{{get_function}}(const char *itemp) {
PyObject *result = *(PyObject **) itemp;
Py_INCREF(result);
return result;
}
static int {{set_function}}(const char *itemp, PyObject *obj) {
Py_INCREF(obj);
Py_DECREF(*(PyObject **) itemp);
*(PyObject **) itemp = obj;
return 1;
}
/////////// ToughSlice //////////
/* Dimension is indexed with 'start:stop:step' */
if (unlikely(__pyx_memoryview_slice_memviewslice(
&{{dst}},
{{src}}.shape[{{dim}}], {{src}}.strides[{{dim}}], {{src}}.suboffsets[{{dim}}],
{{dim}},
{{new_ndim}},
&{{get_suboffset_dim()}},
{{start}},
{{stop}},
{{step}},
{{int(have_start)}},
{{int(have_stop)}},
{{int(have_step)}},
1) < 0))
{
{{error_goto}}
}
////////// SimpleSlice //////////
/* Dimension is indexed with ':' only */
{{dst}}.shape[{{new_ndim}}] = {{src}}.shape[{{dim}}];
{{dst}}.strides[{{new_ndim}}] = {{src}}.strides[{{dim}}];
{{if access == 'direct'}}
{{dst}}.suboffsets[{{new_ndim}}] = -1;
{{else}}
{{dst}}.suboffsets[{{new_ndim}}] = {{src}}.suboffsets[{{dim}}];
if ({{src}}.suboffsets[{{dim}}] >= 0)
{{get_suboffset_dim()}} = {{new_ndim}};
{{endif}}
////////// SliceIndex //////////
// Dimension is indexed with an integer, we could use the ToughSlice
// approach, but this is faster
{
Py_ssize_t __pyx_tmp_idx = {{idx}};
{{if wraparound or boundscheck}}
Py_ssize_t __pyx_tmp_shape = {{src}}.shape[{{dim}}];
{{endif}}
Py_ssize_t __pyx_tmp_stride = {{src}}.strides[{{dim}}];
{{if wraparound}}
if (__pyx_tmp_idx < 0)
__pyx_tmp_idx += __pyx_tmp_shape;
{{endif}}
{{if boundscheck}}
if (unlikely(!__Pyx_is_valid_index(__pyx_tmp_idx, __pyx_tmp_shape))) {
{{if not have_gil}}
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();
#endif
{{endif}}
PyErr_SetString(PyExc_IndexError,
"Index out of bounds (axis {{dim}})");
{{if not have_gil}}
#ifdef WITH_THREAD
PyGILState_Release(__pyx_gilstate_save);
#endif
{{endif}}
{{error_goto}}
}
{{endif}}
{{if all_dimensions_direct}}
{{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride;
{{else}}
if ({{get_suboffset_dim()}} < 0) {
{{dst}}.data += __pyx_tmp_idx * __pyx_tmp_stride;
/* This dimension is the first dimension, or is preceded by */
/* direct or indirect dimensions that are indexed away. */
/* Hence suboffset_dim must be less than zero, and we can have */
/* our data pointer refer to another block by dereferencing. */
/* slice.data -> B -> C becomes slice.data -> C */
{{if indirect}}
{
Py_ssize_t __pyx_tmp_suboffset = {{src}}.suboffsets[{{dim}}];
{{if generic}}
if (__pyx_tmp_suboffset >= 0)
{{endif}}
{{dst}}.data = *((char **) {{dst}}.data) + __pyx_tmp_suboffset;
}
{{endif}}
} else {
{{dst}}.suboffsets[{{get_suboffset_dim()}}] += __pyx_tmp_idx * __pyx_tmp_stride;
/* Note: dimension can not be indirect, the compiler will have */
/* issued an error */
}
{{endif}}
}
////////// FillStrided1DScalar.proto //////////
static void
__pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t stride,
size_t itemsize, void *itemp);
////////// FillStrided1DScalar //////////
/* Fill a slice with a scalar value. The dimension is direct and strided or contiguous */
/* This can be used as a callback for the memoryview object to efficienty assign a scalar */
/* Currently unused */
static void
__pyx_fill_slice_{{dtype_name}}({{type_decl}} *p, Py_ssize_t extent, Py_ssize_t stride,
size_t itemsize, void *itemp)
{
Py_ssize_t i;
{{type_decl}} item = *(({{type_decl}} *) itemp);
{{type_decl}} *endp;
stride /= sizeof({{type_decl}});
endp = p + stride * extent;
while (p < endp) {
*p = item;
p += stride;
}
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/ModuleSetupCode.c 0000644 0001751 0000171 00000154533 00000000000 022067 0 ustar 00runner docker 0000000 0000000 /////////////// CModulePreamble ///////////////
#include /* For offsetof */
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
// For use in DL_IMPORT/DL_EXPORT macros.
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
// CPython has required PY_LONG_LONG support for years, even if HAVE_LONG_LONG is not defined for us
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
// looks like calling _PyType_Lookup() isn't safe in Py<=2.6/3.1
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2
// Python 3.11a2 hid _PyLong_FormatAdvancedWriter and _PyFloat_FormatAdvancedWriter
// therefore disable unicode writer until a better alternative appears
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#if PY_VERSION_HEX >= 0x030B00A4
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#elif !defined(CYTHON_FAST_THREAD_STATE)
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
// Python 3.11 deleted localplus argument from frame object, which is used in our
// fast_pycall code
#define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030B00A1)
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#if PY_VERSION_HEX >= 0x030B00A4
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif !defined(CYTHON_USE_EXC_INFO_STACK)
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#if PY_MAJOR_VERSION < 3
#include "longintrepr.h"
#endif
/* These short defines can easily conflict with other code */
#undef SHIFT
#undef BASE
#undef MASK
/* Compile-time sanity check that these are indeed equal. Github issue #2670. */
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
// restrict
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
// unused attribute
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000 /* Xcode < 7.0 */
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
/////////////// CInitCode ///////////////
// inline attribute
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
/////////////// CppInitCode ///////////////
#ifndef __cplusplus
#error "Cython files generated with the C++ option must be compiled with a C++ compiler."
#endif
// inline attribute
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#else
#define CYTHON_INLINE inline
#endif
#endif
// Work around clang bug http://stackoverflow.com/questions/21847816/c-invoke-nested-template-class-destructor
template
void __Pyx_call_destructor(T& x) {
x.~T();
}
// Used for temporary variables of "reference" type.
template
class __Pyx_FakeReference {
public:
__Pyx_FakeReference() : ptr(NULL) { }
// __Pyx_FakeReference(T& ref) : ptr(&ref) { }
// Const version needed as Cython doesn't know about const overloads (e.g. for stl containers).
__Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { }
T *operator->() { return ptr; }
T *operator&() { return ptr; }
operator T&() { return *ptr; }
// TODO(robertwb): Delegate all operators (or auto-generate unwrapping code where needed).
template bool operator ==(U other) { return *ptr == other; }
template bool operator !=(U other) { return *ptr != other; }
private:
T *ptr;
};
/////////////// PythonCompatibility ///////////////
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_DefaultClassType PyType_Type
#if PY_VERSION_HEX >= 0x030B00A1
static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f,
PyObject *code, PyObject *c, PyObject* n, PyObject *v,
PyObject *fv, PyObject *cell, PyObject* fn,
PyObject *name, int fline, PyObject *lnos) {
// TODO - currently written to be simple and work in limited API etc.
// A more optimized version would be good
PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL;
PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL;
const char *fn_cstr=NULL;
const char *name_cstr=NULL;
PyCodeObject* co=NULL;
PyObject *type, *value, *traceback;
// we must be able to call this while an exception is happening - thus clear then restore the state
PyErr_Fetch(&type, &value, &traceback);
if (!(kwds=PyDict_New())) goto end;
if (!(argcount=PyLong_FromLong(a))) goto end;
if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end;
if (!(posonlyargcount=PyLong_FromLong(0))) goto end;
if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end;
if (!(kwonlyargcount=PyLong_FromLong(k))) goto end;
if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end;
if (!(nlocals=PyLong_FromLong(l))) goto end;
if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end;
if (!(stacksize=PyLong_FromLong(s))) goto end;
if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end;
if (!(flags=PyLong_FromLong(f))) goto end;
if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end;
if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end;
if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end;
if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end;
if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end;
if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too;
if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here
if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too;
Py_XDECREF((PyObject*)co);
co = (PyCodeObject*)call_result;
call_result = NULL;
if (0) {
cleanup_code_too:
Py_XDECREF((PyObject*)co);
co = NULL;
}
end:
Py_XDECREF(kwds);
Py_XDECREF(argcount);
Py_XDECREF(posonlyargcount);
Py_XDECREF(kwonlyargcount);
Py_XDECREF(nlocals);
Py_XDECREF(stacksize);
Py_XDECREF(replace);
Py_XDECREF(call_result);
Py_XDECREF(empty);
if (type) {
PyErr_Restore(type, value, traceback);
}
return co;
}
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
// already defined for Stackless Python (all versions) and C-Python >= 3.7
// value if defined: Stackless Python < 3.6: 0x80 else 0x100
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
// new in CPython 3.6, but changed in 3.7 - see
// positional-only parameters:
// https://bugs.python.org/issue29464
// const args:
// https://bugs.python.org/issue32240
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
// new in CPython 3.7, used to be old signature of _PyCFunctionFast() in 3.6
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func) \
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
// special C-API functions only in Pyston
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
//#elif PY_VERSION_HEX >= 0x03050200
// Actually added in 3.5.2, but compiling against that does not guarantee that we get imported there.
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
// TSS (Thread Specific Storage) API
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0; /* PyThread_create_key reports success always */
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
// PyThread_delete_key_value(key) is equalivalent to PyThread_set_key_value(key, NULL)
// PyThread_ReInitTLS() is a no-op
#endif /* TSS (Thread Specific Storage) API */
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
/* new Py3.3 unicode type (PEP 393) */
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#if defined(PyUnicode_IS_READY)
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
0 : _PyUnicode_Ready((PyObject *)(op)))
#else
// Py3.12 / PEP-623 will remove wstr type unicode strings and all of the PyUnicode_READY() machinery.
#define __Pyx_PyUnicode_READY(op) (0)
#endif
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000
// Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12.
// https://www.python.org/dev/peps/pep-0623/
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#endif
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
/* (void)(k) => avoid unused variable warning due to macro: */
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
// ("..." % x) must call PyNumber_Remainder() if x is a string subclass that implements "__rmod__()".
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
// PyPy3 used to define "PyObject_Unicode"
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
// NOTE: might fail with exception => check for -1
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
// backport of PyAsyncMethods from Py3.5 to older Py3.x versions
// (mis-)using the "tp_reserved" type slot which is re-activated as "tp_as_async" in Py3.5
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
/////////////// SmallCodeConfig.proto ///////////////
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
/////////////// PyModInitFuncType.proto ///////////////
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
// Py2: define this to void manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
// Py3+: define this to PyObject * manually because PyMODINIT_FUNC adds __declspec(dllexport) to it's definition.
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
/////////////// FastTypeChecks.proto ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);/*proto*/
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);/*proto*/
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);/*proto*/
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
/////////////// FastTypeChecks ///////////////
//@requires: Exceptions.c::PyThreadStateGet
//@requires: Exceptions.c::PyErrFetchRestore
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
// should only get here for incompletely initialised types, i.e. never under normal usage patterns
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
// PyObject_IsSubclass() can recurse and therefore is not safe
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
// This function must not fail, so print the error here (which also clears it)
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
// This function must not fail, so print the error here (which also clears it)
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
// so far, we only call PyErr_GivenExceptionMatches() with an exception type (not instance) as first argument
// => optimise for that case
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
// the tighter subtype checking in Py3 allows faster out-of-order comparison
for (i=0; i pure safety check assertions.
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/////////////// MathInitCode ///////////////
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
// Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
// a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
// a quiet NaN.
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
/////////////// UtilityFunctionPredeclarations.proto ///////////////
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; /*proto*/
/////////////// ForceInitThreads.proto ///////////////
//@proto_block: utility_code_proto_before_types
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/////////////// InitThreads.init ///////////////
#if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0
PyEval_InitThreads();
#endif
/////////////// ModuleCreationPEP489 ///////////////
//@substitute: naming
//#if CYTHON_PEP489_MULTI_PHASE_INIT
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* ${pymodule_create_func_cname}(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
// For now, we only have exactly one module instance.
if (__Pyx_check_single_interpreter())
return NULL;
if (${module_cname})
return __Pyx_NewRef(${module_cname});
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
// moddict is a borrowed reference
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
//#endif
/////////////// CodeObjectCache.proto ///////////////
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/////////////// CodeObjectCache ///////////////
// Note that errors are simply ignored in the code below.
// This is just a cache, if a lookup or insertion fails - so what?
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/////////////// CodeObjectCache.cleanup ///////////////
if (__pyx_code_cache.entries) {
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
int i, count = __pyx_code_cache.count;
__pyx_code_cache.count = 0;
__pyx_code_cache.max_count = 0;
__pyx_code_cache.entries = NULL;
for (i=0; iSetupContext((name), __LINE__, __FILE__); \
PyGILState_Release(__pyx_gilstate_save); \
} else { \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext() \
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif /* CYTHON_REFNANNY */
#define __Pyx_XDECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_XDECREF(tmp); \
} while (0)
#define __Pyx_DECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_DECREF(tmp); \
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/////////////// Refnanny ///////////////
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif /* CYTHON_REFNANNY */
/////////////// ImportRefnannyAPI ///////////////
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
/////////////// RegisterModuleCleanup.proto ///////////////
//@substitute: naming
static void ${cleanup_cname}(PyObject *self); /*proto*/
#if PY_MAJOR_VERSION < 3 || CYTHON_COMPILING_IN_PYPY
static int __Pyx_RegisterCleanup(void); /*proto*/
#else
#define __Pyx_RegisterCleanup() (0)
#endif
/////////////// RegisterModuleCleanup ///////////////
//@substitute: naming
#if PY_MAJOR_VERSION < 3 || CYTHON_COMPILING_IN_PYPY
static PyObject* ${cleanup_cname}_atexit(PyObject *module, CYTHON_UNUSED PyObject *unused) {
${cleanup_cname}(module);
Py_INCREF(Py_None); return Py_None;
}
static int __Pyx_RegisterCleanup(void) {
// Don't use Py_AtExit because that has a 32-call limit and is called
// after python finalization.
// Also, we try to prepend the cleanup function to "atexit._exithandlers"
// in Py2 because CPython runs them last-to-first. Being run last allows
// user exit code to run before us that may depend on the globals
// and cached objects that we are about to clean up.
static PyMethodDef cleanup_def = {
"__cleanup", (PyCFunction)${cleanup_cname}_atexit, METH_NOARGS, 0};
PyObject *cleanup_func = 0;
PyObject *atexit = 0;
PyObject *reg = 0;
PyObject *args = 0;
PyObject *res = 0;
int ret = -1;
cleanup_func = PyCFunction_New(&cleanup_def, 0);
if (!cleanup_func)
goto bad;
atexit = PyImport_ImportModule("atexit");
if (!atexit)
goto bad;
reg = PyObject_GetAttrString(atexit, "_exithandlers");
if (reg && PyList_Check(reg)) {
PyObject *a, *kw;
a = PyTuple_New(0);
kw = PyDict_New();
if (!a || !kw) {
Py_XDECREF(a);
Py_XDECREF(kw);
goto bad;
}
args = PyTuple_Pack(3, cleanup_func, a, kw);
Py_DECREF(a);
Py_DECREF(kw);
if (!args)
goto bad;
ret = PyList_Insert(reg, 0, args);
} else {
if (!reg)
PyErr_Clear();
Py_XDECREF(reg);
reg = PyObject_GetAttrString(atexit, "register");
if (!reg)
goto bad;
args = PyTuple_Pack(1, cleanup_func);
if (!args)
goto bad;
res = PyObject_CallObject(reg, args);
if (!res)
goto bad;
ret = 0;
}
bad:
Py_XDECREF(cleanup_func);
Py_XDECREF(atexit);
Py_XDECREF(reg);
Py_XDECREF(args);
Py_XDECREF(res);
return ret;
}
#endif
/////////////// FastGil.init ///////////////
#ifdef WITH_THREAD
__Pyx_FastGilFuncInit();
#endif
/////////////// NoFastGil.proto ///////////////
//@proto_block: utility_code_proto_before_types
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/////////////// FastGil.proto ///////////////
//@proto_block: utility_code_proto_before_types
struct __Pyx_FastGilVtab {
PyGILState_STATE (*Fast_PyGILState_Ensure)(void);
void (*Fast_PyGILState_Release)(PyGILState_STATE oldstate);
void (*FastGIL_Remember)(void);
void (*FastGIL_Forget)(void);
};
static void __Pyx_FastGIL_Noop(void) {}
static struct __Pyx_FastGilVtab __Pyx_FastGilFuncs = {
PyGILState_Ensure,
PyGILState_Release,
__Pyx_FastGIL_Noop,
__Pyx_FastGIL_Noop
};
static void __Pyx_FastGilFuncInit(void);
#define __Pyx_PyGILState_Ensure __Pyx_FastGilFuncs.Fast_PyGILState_Ensure
#define __Pyx_PyGILState_Release __Pyx_FastGilFuncs.Fast_PyGILState_Release
#define __Pyx_FastGIL_Remember __Pyx_FastGilFuncs.FastGIL_Remember
#define __Pyx_FastGIL_Forget __Pyx_FastGilFuncs.FastGIL_Forget
#ifdef WITH_THREAD
#ifndef CYTHON_THREAD_LOCAL
#if __STDC_VERSION__ >= 201112
#define CYTHON_THREAD_LOCAL _Thread_local
#elif defined(__GNUC__)
#define CYTHON_THREAD_LOCAL __thread
#elif defined(_MSC_VER)
#define CYTHON_THREAD_LOCAL __declspec(thread)
#endif
#endif
#endif
/////////////// FastGil ///////////////
//@requires: CommonStructures.c::FetchCommonPointer
// The implementations of PyGILState_Ensure/Release calls PyThread_get_key_value
// several times which is turns out to be quite slow (slower in fact than
// acquiring the GIL itself). Simply storing it in a thread local for the
// common case is much faster.
// To make optimal use of this thread local, we attempt to share it between
// modules.
#define __Pyx_FastGIL_ABI_module "_cython_" CYTHON_ABI
#define __Pyx_FastGIL_PyCapsuleName "FastGilFuncs"
#define __Pyx_FastGIL_PyCapsule \
__Pyx_FastGIL_ABI_module "." __Pyx_FastGIL_PyCapsuleName
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_THREAD_LOCAL
#endif
#ifdef CYTHON_THREAD_LOCAL
#include "pythread.h"
#include "pystate.h"
static CYTHON_THREAD_LOCAL PyThreadState *__Pyx_FastGil_tcur = NULL;
static CYTHON_THREAD_LOCAL int __Pyx_FastGil_tcur_depth = 0;
static int __Pyx_FastGil_autoTLSkey = -1;
static CYTHON_INLINE void __Pyx_FastGIL_Remember0(void) {
++__Pyx_FastGil_tcur_depth;
}
static CYTHON_INLINE void __Pyx_FastGIL_Forget0(void) {
if (--__Pyx_FastGil_tcur_depth == 0) {
__Pyx_FastGil_tcur = NULL;
}
}
static CYTHON_INLINE PyThreadState *__Pyx_FastGil_get_tcur(void) {
PyThreadState *tcur = __Pyx_FastGil_tcur;
if (tcur == NULL) {
tcur = __Pyx_FastGil_tcur = (PyThreadState*)PyThread_get_key_value(__Pyx_FastGil_autoTLSkey);
}
return tcur;
}
static PyGILState_STATE __Pyx_FastGil_PyGILState_Ensure(void) {
int current;
PyThreadState *tcur;
__Pyx_FastGIL_Remember0();
tcur = __Pyx_FastGil_get_tcur();
if (tcur == NULL) {
// Uninitialized, need to initialize now.
return PyGILState_Ensure();
}
current = tcur == __Pyx_PyThreadState_Current;
if (current == 0) {
PyEval_RestoreThread(tcur);
}
++tcur->gilstate_counter;
return current ? PyGILState_LOCKED : PyGILState_UNLOCKED;
}
static void __Pyx_FastGil_PyGILState_Release(PyGILState_STATE oldstate) {
PyThreadState *tcur = __Pyx_FastGil_get_tcur();
__Pyx_FastGIL_Forget0();
if (tcur->gilstate_counter == 1) {
// This is the last lock, do all the cleanup as well.
PyGILState_Release(oldstate);
} else {
--tcur->gilstate_counter;
if (oldstate == PyGILState_UNLOCKED) {
PyEval_SaveThread();
}
}
}
static void __Pyx_FastGilFuncInit0(void) {
/* Try to detect autoTLSkey. */
int key;
void* this_thread_state = (void*) PyGILState_GetThisThreadState();
for (key = 0; key < 100; key++) {
if (PyThread_get_key_value(key) == this_thread_state) {
__Pyx_FastGil_autoTLSkey = key;
break;
}
}
if (__Pyx_FastGil_autoTLSkey != -1) {
PyObject* capsule = NULL;
PyObject* abi_module = NULL;
__Pyx_PyGILState_Ensure = __Pyx_FastGil_PyGILState_Ensure;
__Pyx_PyGILState_Release = __Pyx_FastGil_PyGILState_Release;
__Pyx_FastGIL_Remember = __Pyx_FastGIL_Remember0;
__Pyx_FastGIL_Forget = __Pyx_FastGIL_Forget0;
capsule = PyCapsule_New(&__Pyx_FastGilFuncs, __Pyx_FastGIL_PyCapsule, NULL);
abi_module = PyImport_AddModule(__Pyx_FastGIL_ABI_module);
if (capsule && abi_module) {
PyObject_SetAttrString(abi_module, __Pyx_FastGIL_PyCapsuleName, capsule);
}
Py_XDECREF(capsule);
}
}
#else
static void __Pyx_FastGilFuncInit0(void) {
CYTHON_UNUSED void* force_use = (void*)&__Pyx_FetchCommonPointer;
}
#endif
static void __Pyx_FastGilFuncInit(void) {
#if PY_VERSION_HEX >= 0x02070000
struct __Pyx_FastGilVtab* shared = (struct __Pyx_FastGilVtab*)PyCapsule_Import(__Pyx_FastGIL_PyCapsule, 1);
#else
struct __Pyx_FastGilVtab* shared = NULL;
#endif
if (shared) {
__Pyx_FastGilFuncs = *shared;
} else {
PyErr_Clear();
__Pyx_FastGilFuncInit0();
}
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/ObjectHandling.c 0000644 0001751 0000171 00000255062 00000000000 021700 0 ustar 00runner docker 0000000 0000000 /*
* General object operations and protocol implementations,
* including their specialisations for certain builtins.
*
* Optional optimisations for builtins are in Optimize.c.
*
* Required replacements of builtins are in Builtins.c.
*/
/////////////// RaiseNoneIterError.proto ///////////////
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/////////////// RaiseNoneIterError ///////////////
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/////////////// RaiseTooManyValuesToUnpack.proto ///////////////
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/////////////// RaiseTooManyValuesToUnpack ///////////////
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/////////////// RaiseNeedMoreValuesToUnpack.proto ///////////////
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/////////////// RaiseNeedMoreValuesToUnpack ///////////////
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/////////////// UnpackTupleError.proto ///////////////
static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); /*proto*/
/////////////// UnpackTupleError ///////////////
//@requires: RaiseNoneIterError
//@requires: RaiseNeedMoreValuesToUnpack
//@requires: RaiseTooManyValuesToUnpack
static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
if (t == Py_None) {
__Pyx_RaiseNoneNotIterableError();
} else if (PyTuple_GET_SIZE(t) < index) {
__Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
} else {
__Pyx_RaiseTooManyValuesError(index);
}
}
/////////////// UnpackItemEndCheck.proto ///////////////
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); /*proto*/
/////////////// UnpackItemEndCheck ///////////////
//@requires: RaiseTooManyValuesToUnpack
//@requires: IterFinish
static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
if (unlikely(retval)) {
Py_DECREF(retval);
__Pyx_RaiseTooManyValuesError(expected);
return -1;
} else {
return __Pyx_IterFinish();
}
return 0;
}
/////////////// UnpackTuple2.proto ///////////////
#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple) \
(likely(is_tuple || PyTuple_Check(tuple)) ? \
(likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ? \
__Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) : \
(__Pyx_UnpackTupleError(tuple, 2), -1)) : \
__Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple))
static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple);
static int __Pyx_unpack_tuple2_generic(
PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple);
/////////////// UnpackTuple2 ///////////////
//@requires: UnpackItemEndCheck
//@requires: UnpackTupleError
//@requires: RaiseNeedMoreValuesToUnpack
static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) {
PyObject *value1 = NULL, *value2 = NULL;
#if CYTHON_COMPILING_IN_PYPY
value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad;
value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad;
#else
value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1);
value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2);
#endif
if (decref_tuple) {
Py_DECREF(tuple);
}
*pvalue1 = value1;
*pvalue2 = value2;
return 0;
#if CYTHON_COMPILING_IN_PYPY
bad:
Py_XDECREF(value1);
Py_XDECREF(value2);
if (decref_tuple) { Py_XDECREF(tuple); }
return -1;
#endif
}
static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2,
int has_known_size, int decref_tuple) {
Py_ssize_t index;
PyObject *value1 = NULL, *value2 = NULL, *iter = NULL;
iternextfunc iternext;
iter = PyObject_GetIter(tuple);
if (unlikely(!iter)) goto bad;
if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; }
iternext = Py_TYPE(iter)->tp_iternext;
value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; }
value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; }
if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad;
Py_DECREF(iter);
*pvalue1 = value1;
*pvalue2 = value2;
return 0;
unpacking_failed:
if (!has_known_size && __Pyx_IterFinish() == 0)
__Pyx_RaiseNeedMoreValuesError(index);
bad:
Py_XDECREF(iter);
Py_XDECREF(value1);
Py_XDECREF(value2);
if (decref_tuple) { Py_XDECREF(tuple); }
return -1;
}
/////////////// IterNext.proto ///////////////
#define __Pyx_PyIter_Next(obj) __Pyx_PyIter_Next2(obj, NULL)
static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject *, PyObject *); /*proto*/
/////////////// IterNext ///////////////
//@requires: Exceptions.c::PyThreadStateGet
//@requires: Exceptions.c::PyErrFetchRestore
static PyObject *__Pyx_PyIter_Next2Default(PyObject* defval) {
PyObject* exc_type;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
exc_type = __Pyx_PyErr_Occurred();
if (unlikely(exc_type)) {
if (!defval || unlikely(!__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(defval);
return defval;
}
if (defval) {
Py_INCREF(defval);
return defval;
}
__Pyx_PyErr_SetNone(PyExc_StopIteration);
return NULL;
}
static void __Pyx_PyIter_Next_ErrorNoIterator(PyObject *iterator) {
PyErr_Format(PyExc_TypeError,
"%.200s object is not an iterator", Py_TYPE(iterator)->tp_name);
}
// originally copied from Py3's builtin_next()
static CYTHON_INLINE PyObject *__Pyx_PyIter_Next2(PyObject* iterator, PyObject* defval) {
PyObject* next;
// We always do a quick slot check because calling PyIter_Check() is so wasteful.
iternextfunc iternext = Py_TYPE(iterator)->tp_iternext;
if (likely(iternext)) {
#if CYTHON_USE_TYPE_SLOTS
next = iternext(iterator);
if (likely(next))
return next;
#if PY_VERSION_HEX >= 0x02070000
if (unlikely(iternext == &_PyObject_NextNotImplemented))
return NULL;
#endif
#else
// Since the slot was set, assume that PyIter_Next() will likely succeed, and properly fail otherwise.
// Note: PyIter_Next() crashes in CPython if "tp_iternext" is NULL.
next = PyIter_Next(iterator);
if (likely(next))
return next;
#endif
} else if (CYTHON_USE_TYPE_SLOTS || unlikely(!PyIter_Check(iterator))) {
// If CYTHON_USE_TYPE_SLOTS, then the slot was not set and we don't have an iterable.
// Otherwise, don't trust "tp_iternext" and rely on PyIter_Check().
__Pyx_PyIter_Next_ErrorNoIterator(iterator);
return NULL;
}
#if !CYTHON_USE_TYPE_SLOTS
else {
// We have an iterator with an empty "tp_iternext", but didn't call next() on it yet.
next = PyIter_Next(iterator);
if (likely(next))
return next;
}
#endif
return __Pyx_PyIter_Next2Default(defval);
}
/////////////// IterFinish.proto ///////////////
static CYTHON_INLINE int __Pyx_IterFinish(void); /*proto*/
/////////////// IterFinish ///////////////
// When PyIter_Next(iter) has returned NULL in order to signal termination,
// this function does the right cleanup and returns 0 on success. If it
// detects an error that occurred in the iterator, it returns -1.
static CYTHON_INLINE int __Pyx_IterFinish(void) {
#if CYTHON_FAST_THREAD_STATE
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* exc_type = tstate->curexc_type;
if (unlikely(exc_type)) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
PyObject *exc_value, *exc_tb;
exc_value = tstate->curexc_value;
exc_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
Py_DECREF(exc_type);
Py_XDECREF(exc_value);
Py_XDECREF(exc_tb);
return 0;
} else {
return -1;
}
}
return 0;
#else
if (unlikely(PyErr_Occurred())) {
if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
PyErr_Clear();
return 0;
} else {
return -1;
}
}
return 0;
#endif
}
/////////////// ObjectGetItem.proto ///////////////
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);/*proto*/
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/////////////// ObjectGetItem ///////////////
// //@requires: GetItemInt - added in IndexNode as it uses templating.
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
// Error handling code -- only manage OverflowError differently.
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/////////////// DictGetItem.proto ///////////////
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);/*proto*/
#define __Pyx_PyObject_Dict_GetItem(obj, name) \
(likely(PyDict_CheckExact(obj)) ? \
__Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
#else
#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
#endif
/////////////// DictGetItem ///////////////
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
PyObject *value;
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (!PyErr_Occurred()) {
if (unlikely(PyTuple_Check(key))) {
// CPython interprets tuples as separate arguments => must wrap them in another tuple.
PyObject* args = PyTuple_Pack(1, key);
if (likely(args)) {
PyErr_SetObject(PyExc_KeyError, args);
Py_DECREF(args);
}
} else {
// Avoid tuple packing if possible.
PyErr_SetObject(PyExc_KeyError, key);
}
}
return NULL;
}
Py_INCREF(value);
return value;
}
#endif
/////////////// GetItemInt.proto ///////////////
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) : \
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) : \
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
{{for type in ['List', 'Tuple']}}
#define __Pyx_GetItemInt_{{type}}(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_{{type}}_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "{{ type.lower() }} index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_{{type}}_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
{{endfor}}
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/////////////// GetItemInt ///////////////
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
{{for type in ['List', 'Tuple']}}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_{{type}}_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += Py{{type}}_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, Py{{type}}_GET_SIZE(o)))) {
PyObject *r = Py{{type}}_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
{{endfor}}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
// inlined PySequence_GetItem() + special cased length overflow
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
// if length > max(Py_ssize_t), maybe the object can wrap around itself?
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/////////////// SetItemInt.proto ///////////////
#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) : \
(is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) : \
__Pyx_SetItemInt_Generic(o, to_py_func(i), v)))
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v);
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v,
int is_list, int wraparound, int boundscheck);
/////////////// SetItemInt ///////////////
static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) {
int r;
if (!j) return -1;
r = PyObject_SetItem(o, j, v);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list,
CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o));
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) {
PyObject* old = PyList_GET_ITEM(o, n);
Py_INCREF(v);
PyList_SET_ITEM(o, n, v);
Py_DECREF(old);
return 1;
}
} else {
// inlined PySequence_SetItem() + special cased length overflow
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_ass_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
// if length > max(Py_ssize_t), maybe the object can wrap around itself?
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return -1;
PyErr_Clear();
}
}
return m->sq_ass_item(o, i, v);
}
}
#else
#if CYTHON_COMPILING_IN_PYPY
if (is_list || (PySequence_Check(o) && !PyDict_Check(o)))
#else
if (is_list || PySequence_Check(o))
#endif
{
return PySequence_SetItem(o, i, v);
}
#endif
return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v);
}
/////////////// DelItemInt.proto ///////////////
#define __Pyx_DelItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_DelItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound) : \
(is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) : \
__Pyx_DelItem_Generic(o, to_py_func(i))))
static int __Pyx_DelItem_Generic(PyObject *o, PyObject *j);
static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound);
/////////////// DelItemInt ///////////////
static int __Pyx_DelItem_Generic(PyObject *o, PyObject *j) {
int r;
if (!j) return -1;
r = PyObject_DelItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE int __Pyx_DelItemInt_Fast(PyObject *o, Py_ssize_t i,
CYTHON_UNUSED int is_list, CYTHON_NCP_UNUSED int wraparound) {
#if !CYTHON_USE_TYPE_SLOTS
if (is_list || PySequence_Check(o)) {
return PySequence_DelItem(o, i);
}
#else
// inlined PySequence_DelItem() + special cased length overflow
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_ass_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
// if length > max(Py_ssize_t), maybe the object can wrap around itself?
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return -1;
PyErr_Clear();
}
}
return m->sq_ass_item(o, i, (PyObject *)NULL);
}
#endif
return __Pyx_DelItem_Generic(o, PyInt_FromSsize_t(i));
}
/////////////// SliceObject.proto ///////////////
// we pass pointer addresses to show the C compiler what is NULL and what isn't
{{if access == 'Get'}}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
int has_cstart, int has_cstop, int wraparound);
{{else}}
#define __Pyx_PyObject_DelSlice(obj, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound) \
__Pyx_PyObject_SetSlice(obj, (PyObject*)NULL, cstart, cstop, py_start, py_stop, py_slice, has_cstart, has_cstop, wraparound)
// we pass pointer addresses to show the C compiler what is NULL and what isn't
static CYTHON_INLINE int __Pyx_PyObject_SetSlice(
PyObject* obj, PyObject* value, Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
int has_cstart, int has_cstop, int wraparound);
{{endif}}
/////////////// SliceObject ///////////////
{{if access == 'Get'}}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
{{else}}
static CYTHON_INLINE int __Pyx_PyObject_SetSlice(PyObject* obj, PyObject* value,
{{endif}}
Py_ssize_t cstart, Py_ssize_t cstop,
PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
#if CYTHON_USE_TYPE_SLOTS
PyMappingMethods* mp;
#if PY_MAJOR_VERSION < 3
PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
if (likely(ms && ms->sq_{{if access == 'Set'}}ass_{{endif}}slice)) {
if (!has_cstart) {
if (_py_start && (*_py_start != Py_None)) {
cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstart = 0;
}
if (!has_cstop) {
if (_py_stop && (*_py_stop != Py_None)) {
cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
} else
cstop = PY_SSIZE_T_MAX;
}
if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
Py_ssize_t l = ms->sq_length(obj);
if (likely(l >= 0)) {
if (cstop < 0) {
cstop += l;
if (cstop < 0) cstop = 0;
}
if (cstart < 0) {
cstart += l;
if (cstart < 0) cstart = 0;
}
} else {
// if length > max(Py_ssize_t), maybe the object can wrap around itself?
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
goto bad;
PyErr_Clear();
}
}
{{if access == 'Get'}}
return ms->sq_slice(obj, cstart, cstop);
{{else}}
return ms->sq_ass_slice(obj, cstart, cstop, value);
{{endif}}
}
#endif
mp = Py_TYPE(obj)->tp_as_mapping;
{{if access == 'Get'}}
if (likely(mp && mp->mp_subscript))
{{else}}
if (likely(mp && mp->mp_ass_subscript))
{{endif}}
#endif
{
{{if access == 'Get'}}PyObject*{{else}}int{{endif}} result;
PyObject *py_slice, *py_start, *py_stop;
if (_py_slice) {
py_slice = *_py_slice;
} else {
PyObject* owned_start = NULL;
PyObject* owned_stop = NULL;
if (_py_start) {
py_start = *_py_start;
} else {
if (has_cstart) {
owned_start = py_start = PyInt_FromSsize_t(cstart);
if (unlikely(!py_start)) goto bad;
} else
py_start = Py_None;
}
if (_py_stop) {
py_stop = *_py_stop;
} else {
if (has_cstop) {
owned_stop = py_stop = PyInt_FromSsize_t(cstop);
if (unlikely(!py_stop)) {
Py_XDECREF(owned_start);
goto bad;
}
} else
py_stop = Py_None;
}
py_slice = PySlice_New(py_start, py_stop, Py_None);
Py_XDECREF(owned_start);
Py_XDECREF(owned_stop);
if (unlikely(!py_slice)) goto bad;
}
#if CYTHON_USE_TYPE_SLOTS
{{if access == 'Get'}}
result = mp->mp_subscript(obj, py_slice);
#else
result = PyObject_GetItem(obj, py_slice);
{{else}}
result = mp->mp_ass_subscript(obj, py_slice, value);
#else
result = value ? PyObject_SetItem(obj, py_slice, value) : PyObject_DelItem(obj, py_slice);
{{endif}}
#endif
if (!_py_slice) {
Py_DECREF(py_slice);
}
return result;
}
PyErr_Format(PyExc_TypeError,
{{if access == 'Get'}}
"'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
{{else}}
"'%.200s' object does not support slice %.10s",
Py_TYPE(obj)->tp_name, value ? "assignment" : "deletion");
{{endif}}
bad:
return {{if access == 'Get'}}NULL{{else}}-1{{endif}};
}
/////////////// SliceTupleAndList.proto ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyList_GetSlice(PyObject* src, Py_ssize_t start, Py_ssize_t stop);
static CYTHON_INLINE PyObject* __Pyx_PyTuple_GetSlice(PyObject* src, Py_ssize_t start, Py_ssize_t stop);
#else
#define __Pyx_PyList_GetSlice(seq, start, stop) PySequence_GetSlice(seq, start, stop)
#define __Pyx_PyTuple_GetSlice(seq, start, stop) PySequence_GetSlice(seq, start, stop)
#endif
/////////////// SliceTupleAndList ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE void __Pyx_crop_slice(Py_ssize_t* _start, Py_ssize_t* _stop, Py_ssize_t* _length) {
Py_ssize_t start = *_start, stop = *_stop, length = *_length;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
else if (stop > length)
stop = length;
*_length = stop - start;
*_start = start;
*_stop = stop;
}
static CYTHON_INLINE void __Pyx_copy_object_array(PyObject** CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) {
PyObject *v;
Py_ssize_t i;
for (i = 0; i < length; i++) {
v = dest[i] = src[i];
Py_INCREF(v);
}
}
{{for type in ['List', 'Tuple']}}
static CYTHON_INLINE PyObject* __Pyx_Py{{type}}_GetSlice(
PyObject* src, Py_ssize_t start, Py_ssize_t stop) {
PyObject* dest;
Py_ssize_t length = Py{{type}}_GET_SIZE(src);
__Pyx_crop_slice(&start, &stop, &length);
if (unlikely(length <= 0))
return Py{{type}}_New(0);
dest = Py{{type}}_New(length);
if (unlikely(!dest))
return NULL;
__Pyx_copy_object_array(
((Py{{type}}Object*)src)->ob_item + start,
((Py{{type}}Object*)dest)->ob_item,
length);
return dest;
}
{{endfor}}
#endif
/////////////// CalculateMetaclass.proto ///////////////
static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases);
/////////////// CalculateMetaclass ///////////////
static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) {
Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases);
for (i=0; i < nbases; i++) {
PyTypeObject *tmptype;
PyObject *tmp = PyTuple_GET_ITEM(bases, i);
tmptype = Py_TYPE(tmp);
#if PY_MAJOR_VERSION < 3
if (tmptype == &PyClass_Type)
continue;
#endif
if (!metaclass) {
metaclass = tmptype;
continue;
}
if (PyType_IsSubtype(metaclass, tmptype))
continue;
if (PyType_IsSubtype(tmptype, metaclass)) {
metaclass = tmptype;
continue;
}
// else:
PyErr_SetString(PyExc_TypeError,
"metaclass conflict: "
"the metaclass of a derived class "
"must be a (non-strict) subclass "
"of the metaclasses of all its bases");
return NULL;
}
if (!metaclass) {
#if PY_MAJOR_VERSION < 3
metaclass = &PyClass_Type;
#else
metaclass = &PyType_Type;
#endif
}
// make owned reference
Py_INCREF((PyObject*) metaclass);
return (PyObject*) metaclass;
}
/////////////// FindInheritedMetaclass.proto ///////////////
static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases); /*proto*/
/////////////// FindInheritedMetaclass ///////////////
//@requires: PyObjectGetAttrStr
//@requires: CalculateMetaclass
static PyObject *__Pyx_FindInheritedMetaclass(PyObject *bases) {
PyObject *metaclass;
if (PyTuple_Check(bases) && PyTuple_GET_SIZE(bases) > 0) {
PyTypeObject *metatype;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *base = PyTuple_GET_ITEM(bases, 0);
#else
PyObject *base = PySequence_ITEM(bases, 0);
#endif
#if PY_MAJOR_VERSION < 3
PyObject* basetype = __Pyx_PyObject_GetAttrStr(base, PYIDENT("__class__"));
if (basetype) {
metatype = (PyType_Check(basetype)) ? ((PyTypeObject*) basetype) : NULL;
} else {
PyErr_Clear();
metatype = Py_TYPE(base);
basetype = (PyObject*) metatype;
Py_INCREF(basetype);
}
#else
metatype = Py_TYPE(base);
#endif
metaclass = __Pyx_CalculateMetaclass(metatype, bases);
#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS)
Py_DECREF(base);
#endif
#if PY_MAJOR_VERSION < 3
Py_DECREF(basetype);
#endif
} else {
// no bases => use default metaclass
#if PY_MAJOR_VERSION < 3
metaclass = (PyObject *) &PyClass_Type;
#else
metaclass = (PyObject *) &PyType_Type;
#endif
Py_INCREF(metaclass);
}
return metaclass;
}
/////////////// Py3MetaclassGet.proto ///////////////
static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw); /*proto*/
/////////////// Py3MetaclassGet ///////////////
//@requires: FindInheritedMetaclass
//@requires: CalculateMetaclass
static PyObject *__Pyx_Py3MetaclassGet(PyObject *bases, PyObject *mkw) {
PyObject *metaclass = mkw ? __Pyx_PyDict_GetItemStr(mkw, PYIDENT("metaclass")) : NULL;
if (metaclass) {
Py_INCREF(metaclass);
if (PyDict_DelItem(mkw, PYIDENT("metaclass")) < 0) {
Py_DECREF(metaclass);
return NULL;
}
if (PyType_Check(metaclass)) {
PyObject* orig = metaclass;
metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases);
Py_DECREF(orig);
}
return metaclass;
}
return __Pyx_FindInheritedMetaclass(bases);
}
/////////////// CreateClass.proto ///////////////
static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name,
PyObject *qualname, PyObject *modname); /*proto*/
/////////////// CreateClass ///////////////
//@requires: FindInheritedMetaclass
//@requires: CalculateMetaclass
static PyObject *__Pyx_CreateClass(PyObject *bases, PyObject *dict, PyObject *name,
PyObject *qualname, PyObject *modname) {
PyObject *result;
PyObject *metaclass;
if (PyDict_SetItem(dict, PYIDENT("__module__"), modname) < 0)
return NULL;
if (PyDict_SetItem(dict, PYIDENT("__qualname__"), qualname) < 0)
return NULL;
/* Python2 __metaclass__ */
metaclass = __Pyx_PyDict_GetItemStr(dict, PYIDENT("__metaclass__"));
if (metaclass) {
Py_INCREF(metaclass);
if (PyType_Check(metaclass)) {
PyObject* orig = metaclass;
metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases);
Py_DECREF(orig);
}
} else {
metaclass = __Pyx_FindInheritedMetaclass(bases);
}
if (unlikely(!metaclass))
return NULL;
result = PyObject_CallFunctionObjArgs(metaclass, name, bases, dict, NULL);
Py_DECREF(metaclass);
return result;
}
/////////////// Py3ClassCreate.proto ///////////////
static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname,
PyObject *mkw, PyObject *modname, PyObject *doc); /*proto*/
static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict,
PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); /*proto*/
/////////////// Py3ClassCreate ///////////////
//@requires: PyObjectGetAttrStr
//@requires: CalculateMetaclass
static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name,
PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) {
PyObject *ns;
if (metaclass) {
PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, PYIDENT("__prepare__"));
if (prep) {
PyObject *pargs = PyTuple_Pack(2, name, bases);
if (unlikely(!pargs)) {
Py_DECREF(prep);
return NULL;
}
ns = PyObject_Call(prep, pargs, mkw);
Py_DECREF(prep);
Py_DECREF(pargs);
} else {
if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
PyErr_Clear();
ns = PyDict_New();
}
} else {
ns = PyDict_New();
}
if (unlikely(!ns))
return NULL;
/* Required here to emulate assignment order */
if (unlikely(PyObject_SetItem(ns, PYIDENT("__module__"), modname) < 0)) goto bad;
if (unlikely(PyObject_SetItem(ns, PYIDENT("__qualname__"), qualname) < 0)) goto bad;
if (unlikely(doc && PyObject_SetItem(ns, PYIDENT("__doc__"), doc) < 0)) goto bad;
return ns;
bad:
Py_DECREF(ns);
return NULL;
}
static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases,
PyObject *dict, PyObject *mkw,
int calculate_metaclass, int allow_py2_metaclass) {
PyObject *result, *margs;
PyObject *owned_metaclass = NULL;
if (allow_py2_metaclass) {
/* honour Python2 __metaclass__ for backward compatibility */
owned_metaclass = PyObject_GetItem(dict, PYIDENT("__metaclass__"));
if (owned_metaclass) {
metaclass = owned_metaclass;
} else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) {
PyErr_Clear();
} else {
return NULL;
}
}
if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) {
metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases);
Py_XDECREF(owned_metaclass);
if (unlikely(!metaclass))
return NULL;
owned_metaclass = metaclass;
}
margs = PyTuple_Pack(3, name, bases, dict);
if (unlikely(!margs)) {
result = NULL;
} else {
result = PyObject_Call(metaclass, margs, mkw);
Py_DECREF(margs);
}
Py_XDECREF(owned_metaclass);
return result;
}
/////////////// ExtTypeTest.proto ///////////////
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /*proto*/
/////////////// ExtTypeTest ///////////////
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/////////////// CallableCheck.proto ///////////////
#if CYTHON_USE_TYPE_SLOTS && PY_MAJOR_VERSION >= 3
#define __Pyx_PyCallable_Check(obj) (Py_TYPE(obj)->tp_call != NULL)
#else
#define __Pyx_PyCallable_Check(obj) PyCallable_Check(obj)
#endif
/////////////// PyDictContains.proto ///////////////
static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) {
int result = PyDict_Contains(dict, item);
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
/////////////// PySetContains.proto ///////////////
static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq); /* proto */
/////////////// PySetContains ///////////////
//@requires: Builtins.c::pyfrozenset_new
static int __Pyx_PySet_ContainsUnhashable(PyObject *set, PyObject *key) {
int result = -1;
if (PySet_Check(key) && PyErr_ExceptionMatches(PyExc_TypeError)) {
/* Convert key to frozenset */
PyObject *tmpkey;
PyErr_Clear();
tmpkey = __Pyx_PyFrozenSet_New(key);
if (tmpkey != NULL) {
result = PySet_Contains(set, tmpkey);
Py_DECREF(tmpkey);
}
}
return result;
}
static CYTHON_INLINE int __Pyx_PySet_ContainsTF(PyObject* key, PyObject* set, int eq) {
int result = PySet_Contains(set, key);
if (unlikely(result < 0)) {
result = __Pyx_PySet_ContainsUnhashable(set, key);
}
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
/////////////// PySequenceContains.proto ///////////////
static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
int result = PySequence_Contains(seq, item);
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
/////////////// PyBoolOrNullFromLong.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_PyBoolOrNull_FromLong(long b) {
return unlikely(b < 0) ? NULL : __Pyx_PyBool_FromLong(b);
}
/////////////// GetBuiltinName.proto ///////////////
static PyObject *__Pyx_GetBuiltinName(PyObject *name); /*proto*/
/////////////// GetBuiltinName ///////////////
//@requires: PyObjectGetAttrStr
//@substitute: naming
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr($builtins_cname, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/////////////// GetNameInClass.proto ///////////////
#define __Pyx_GetNameInClass(var, nmspace, name) (var) = __Pyx__GetNameInClass(nmspace, name)
static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name); /*proto*/
/////////////// GetNameInClass ///////////////
//@requires: PyObjectGetAttrStr
//@requires: GetModuleGlobalName
//@requires: Exceptions.c::PyThreadStateGet
//@requires: Exceptions.c::PyErrFetchRestore
//@requires: Exceptions.c::PyErrExceptionMatches
static PyObject *__Pyx_GetGlobalNameAfterAttributeLookup(PyObject *name) {
PyObject *result;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
__Pyx_GetModuleGlobalNameUncached(result, name);
return result;
}
static PyObject *__Pyx__GetNameInClass(PyObject *nmspace, PyObject *name) {
PyObject *result;
result = __Pyx_PyObject_GetAttrStr(nmspace, name);
if (!result) {
result = __Pyx_GetGlobalNameAfterAttributeLookup(name);
}
return result;
}
/////////////// SetNameInClass.proto ///////////////
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
// Identifier names are always interned and have a pre-calculated hash value.
#define __Pyx_SetNameInClass(ns, name, value) \
(likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value))
#elif CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_SetNameInClass(ns, name, value) \
(likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value))
#else
#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value)
#endif
/////////////// GetModuleGlobalName.proto ///////////////
//@requires: PyDictVersioning
//@substitute: naming
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) { \
static PY_UINT64_T __pyx_dict_version = 0; \
static PyObject *__pyx_dict_cached_value = NULL; \
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION($moddict_cname))) ? \
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) : \
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) { \
PY_UINT64_T __pyx_dict_version; \
PyObject *__pyx_dict_cached_value; \
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value); \
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); /*proto*/
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); /*proto*/
#endif
/////////////// GetModuleGlobalName ///////////////
//@requires: GetBuiltinName
//@substitute: naming
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
// Identifier names are always interned and have a pre-calculated hash value.
result = _PyDict_GetItem_KnownHash($moddict_cname, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem($moddict_cname, name);
__PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem($moddict_cname, name);
__PYX_UPDATE_DICT_CACHE($moddict_cname, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
//////////////////// GetAttr.proto ////////////////////
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /*proto*/
//////////////////// GetAttr ////////////////////
//@requires: PyObjectGetAttrStr
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/////////////// PyObjectLookupSpecial.proto ///////////////
//@requires: PyObjectGetAttrStr
#if CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_LookupSpecial(PyObject* obj, PyObject* attr_name) {
PyObject *res;
PyTypeObject *tp = Py_TYPE(obj);
#if PY_MAJOR_VERSION < 3
if (unlikely(PyInstance_Check(obj)))
return __Pyx_PyObject_GetAttrStr(obj, attr_name);
#endif
// adapted from CPython's special_lookup() in ceval.c
res = _PyType_Lookup(tp, attr_name);
if (likely(res)) {
descrgetfunc f = Py_TYPE(res)->tp_descr_get;
if (!f) {
Py_INCREF(res);
} else {
res = f(res, obj, (PyObject *)tp);
}
} else {
PyErr_SetObject(PyExc_AttributeError, attr_name);
}
return res;
}
#else
#define __Pyx_PyObject_LookupSpecial(o,n) __Pyx_PyObject_GetAttrStr(o,n)
#endif
/////////////// PyObject_GenericGetAttrNoDict.proto ///////////////
// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7.
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
// No-args macro to allow function pointer assignment.
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/////////////// PyObject_GenericGetAttrNoDict ///////////////
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
// Copied and adapted from _PyObject_GenericGetAttrWithDict() in CPython 2.6/3.7.
// To be used in the "tp_getattro" slot of extension types that have no instance dict and cannot be subclassed.
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
// Optimise for the non-descriptor case because it is faster.
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/////////////// PyObject_GenericGetAttr.proto ///////////////
// Setting "tp_getattro" to anything but "PyObject_GenericGetAttr" disables fast method calls in Py3.7.
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
// No-args macro to allow function pointer assignment.
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/////////////// PyObject_GenericGetAttr ///////////////
//@requires: PyObject_GenericGetAttrNoDict
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/////////////// PyObjectGetAttrStrNoError.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);/*proto*/
/////////////// PyObjectGetAttrStrNoError ///////////////
//@requires: PyObjectGetAttrStr
//@requires: Exceptions.c::PyThreadStateGet
//@requires: Exceptions.c::PyErrFetchRestore
//@requires: Exceptions.c::PyErrExceptionMatches
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
// _PyObject_GenericGetAttrWithDict() in CPython 3.7+ can avoid raising the AttributeError.
// See https://bugs.python.org/issue32544
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/////////////// PyObjectGetAttrStr.proto ///////////////
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);/*proto*/
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/////////////// PyObjectGetAttrStr ///////////////
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/////////////// PyObjectSetAttrStr.proto ///////////////
#if CYTHON_USE_TYPE_SLOTS
#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL)
static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value);/*proto*/
#else
#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n)
#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v)
#endif
/////////////// PyObjectSetAttrStr ///////////////
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_setattro))
return tp->tp_setattro(obj, attr_name, value);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_setattr))
return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value);
#endif
return PyObject_SetAttr(obj, attr_name, value);
}
#endif
/////////////// PyObjectGetMethod.proto ///////////////
static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);/*proto*/
/////////////// PyObjectGetMethod ///////////////
//@requires: PyObjectGetAttrStr
static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) {
PyObject *attr;
#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP
// Copied from _PyObject_GetMethod() in CPython 3.7
PyTypeObject *tp = Py_TYPE(obj);
PyObject *descr;
descrgetfunc f = NULL;
PyObject **dictptr, *dict;
int meth_found = 0;
assert (*method == NULL);
if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) {
attr = __Pyx_PyObject_GetAttrStr(obj, name);
goto try_unpack;
}
if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) {
return 0;
}
descr = _PyType_Lookup(tp, name);
if (likely(descr != NULL)) {
Py_INCREF(descr);
// Repeating the condition below accommodates for MSVC's inability to test macros inside of macro expansions.
#if PY_MAJOR_VERSION >= 3
#ifdef __Pyx_CyFunction_USED
if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr)))
#else
if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type)))
#endif
#else
// "PyMethodDescr_Type" is not part of the C-API in Py2.
#ifdef __Pyx_CyFunction_USED
if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr)))
#else
if (likely(PyFunction_Check(descr)))
#endif
#endif
{
meth_found = 1;
} else {
f = Py_TYPE(descr)->tp_descr_get;
if (f != NULL && PyDescr_IsData(descr)) {
attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
Py_DECREF(descr);
goto try_unpack;
}
}
}
dictptr = _PyObject_GetDictPtr(obj);
if (dictptr != NULL && (dict = *dictptr) != NULL) {
Py_INCREF(dict);
attr = __Pyx_PyDict_GetItemStr(dict, name);
if (attr != NULL) {
Py_INCREF(attr);
Py_DECREF(dict);
Py_XDECREF(descr);
goto try_unpack;
}
Py_DECREF(dict);
}
if (meth_found) {
*method = descr;
return 1;
}
if (f != NULL) {
attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
Py_DECREF(descr);
goto try_unpack;
}
if (descr != NULL) {
*method = descr;
return 0;
}
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(name));
#endif
return 0;
// Generic fallback implementation using normal attribute lookup.
#else
attr = __Pyx_PyObject_GetAttrStr(obj, name);
goto try_unpack;
#endif
try_unpack:
#if CYTHON_UNPACK_METHODS
// Even if we failed to avoid creating a bound method object, it's still worth unpacking it now, if possible.
if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) {
PyObject *function = PyMethod_GET_FUNCTION(attr);
Py_INCREF(function);
Py_DECREF(attr);
*method = function;
return 1;
}
#endif
*method = attr;
return 0;
}
/////////////// UnpackUnboundCMethod.proto ///////////////
typedef struct {
PyObject *type;
PyObject **method_name;
// "func" is set on first access (direct C function pointer)
PyCFunction func;
// "method" is set on first access (fallback)
PyObject *method;
int flag;
} __Pyx_CachedCFunction;
/////////////// UnpackUnboundCMethod ///////////////
//@requires: PyObjectGetAttrStr
static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) {
PyObject *method;
method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name);
if (unlikely(!method))
return -1;
target->method = method;
#if CYTHON_COMPILING_IN_CPYTHON
#if PY_MAJOR_VERSION >= 3
// method dscriptor type isn't exported in Py2.x, cannot easily check the type there
if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type)))
#endif
{
PyMethodDescrObject *descr = (PyMethodDescrObject*) method;
target->func = descr->d_method->ml_meth;
target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS);
}
#endif
return 0;
}
/////////////// CallUnboundCMethod0.proto ///////////////
//@substitute: naming
static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
// FASTCALL methods receive "&empty_tuple" as simple "PyObject[0]*"
#define __Pyx_CallUnboundCMethod0(cfunc, self) \
(likely((cfunc)->func) ? \
(likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) : \
(PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ? \
(PY_VERSION_HEX >= 0x030700A0 ? \
(*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0) : \
(*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL)) : \
(PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ? \
(*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &$empty_tuple, 0, NULL) : \
(likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, $empty_tuple, NULL)) : \
((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, $empty_tuple) : \
__Pyx__CallUnboundCMethod0(cfunc, self)))))) : \
__Pyx__CallUnboundCMethod0(cfunc, self))
#else
#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self)
#endif
/////////////// CallUnboundCMethod0 ///////////////
//@requires: UnpackUnboundCMethod
//@requires: PyObjectCall
static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) {
PyObject *args, *result = NULL;
if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
#if CYTHON_ASSUME_SAFE_MACROS
args = PyTuple_New(1);
if (unlikely(!args)) goto bad;
Py_INCREF(self);
PyTuple_SET_ITEM(args, 0, self);
#else
args = PyTuple_Pack(1, self);
if (unlikely(!args)) goto bad;
#endif
result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
Py_DECREF(args);
bad:
return result;
}
/////////////// CallUnboundCMethod1.proto ///////////////
static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);/*proto*/
#else
#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg)
#endif
/////////////// CallUnboundCMethod1 ///////////////
//@requires: UnpackUnboundCMethod
//@requires: PyObjectCall
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg) {
if (likely(cfunc->func)) {
int flag = cfunc->flag;
// Not using #ifdefs for PY_VERSION_HEX to avoid C compiler warnings about unused functions.
if (flag == METH_O) {
return (*(cfunc->func))(self, arg);
} else if (PY_VERSION_HEX >= 0x030600B1 && flag == METH_FASTCALL) {
if (PY_VERSION_HEX >= 0x030700A0) {
return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, &arg, 1);
} else {
return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL);
}
} else if (PY_VERSION_HEX >= 0x030700A0 && flag == (METH_FASTCALL | METH_KEYWORDS)) {
return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL);
}
}
return __Pyx__CallUnboundCMethod1(cfunc, self, arg);
}
#endif
static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){
PyObject *args, *result = NULL;
if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
#if CYTHON_COMPILING_IN_CPYTHON
if (cfunc->func && (cfunc->flag & METH_VARARGS)) {
args = PyTuple_New(1);
if (unlikely(!args)) goto bad;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
if (cfunc->flag & METH_KEYWORDS)
result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL);
else
result = (*cfunc->func)(self, args);
} else {
args = PyTuple_New(2);
if (unlikely(!args)) goto bad;
Py_INCREF(self);
PyTuple_SET_ITEM(args, 0, self);
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 1, arg);
result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
}
#else
args = PyTuple_Pack(2, self, arg);
if (unlikely(!args)) goto bad;
result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
#endif
bad:
Py_XDECREF(args);
return result;
}
/////////////// CallUnboundCMethod2.proto ///////////////
static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2); /*proto*/
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1
static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2); /*proto*/
#else
#define __Pyx_CallUnboundCMethod2(cfunc, self, arg1, arg2) __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2)
#endif
/////////////// CallUnboundCMethod2 ///////////////
//@requires: UnpackUnboundCMethod
//@requires: PyObjectCall
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030600B1
static CYTHON_INLINE PyObject *__Pyx_CallUnboundCMethod2(__Pyx_CachedCFunction *cfunc, PyObject *self, PyObject *arg1, PyObject *arg2) {
if (likely(cfunc->func)) {
PyObject *args[2] = {arg1, arg2};
if (cfunc->flag == METH_FASTCALL) {
#if PY_VERSION_HEX >= 0x030700A0
return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, args, 2);
#else
return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL);
#endif
}
#if PY_VERSION_HEX >= 0x030700A0
if (cfunc->flag == (METH_FASTCALL | METH_KEYWORDS))
return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, 2, NULL);
#endif
}
return __Pyx__CallUnboundCMethod2(cfunc, self, arg1, arg2);
}
#endif
static PyObject* __Pyx__CallUnboundCMethod2(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg1, PyObject* arg2){
PyObject *args, *result = NULL;
if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
#if CYTHON_COMPILING_IN_CPYTHON
if (cfunc->func && (cfunc->flag & METH_VARARGS)) {
args = PyTuple_New(2);
if (unlikely(!args)) goto bad;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
if (cfunc->flag & METH_KEYWORDS)
result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL);
else
result = (*cfunc->func)(self, args);
} else {
args = PyTuple_New(3);
if (unlikely(!args)) goto bad;
Py_INCREF(self);
PyTuple_SET_ITEM(args, 0, self);
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 1, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 2, arg2);
result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
}
#else
args = PyTuple_Pack(3, self, arg1, arg2);
if (unlikely(!args)) goto bad;
result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
#endif
bad:
Py_XDECREF(args);
return result;
}
/////////////// PyObjectCallMethod0.proto ///////////////
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); /*proto*/
/////////////// PyObjectCallMethod0 ///////////////
//@requires: PyObjectGetMethod
//@requires: PyObjectCallOneArg
//@requires: PyObjectCallNoArg
static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) {
PyObject *method = NULL, *result = NULL;
int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
if (likely(is_method)) {
result = __Pyx_PyObject_CallOneArg(method, obj);
Py_DECREF(method);
return result;
}
if (unlikely(!method)) goto bad;
result = __Pyx_PyObject_CallNoArg(method);
Py_DECREF(method);
bad:
return result;
}
/////////////// PyObjectCallMethod1.proto ///////////////
static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg); /*proto*/
/////////////// PyObjectCallMethod1 ///////////////
//@requires: PyObjectGetMethod
//@requires: PyObjectCallOneArg
//@requires: PyObjectCall2Args
static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) {
// Separate function to avoid excessive inlining.
PyObject *result = __Pyx_PyObject_CallOneArg(method, arg);
Py_DECREF(method);
return result;
}
static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) {
PyObject *method = NULL, *result;
int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
if (likely(is_method)) {
result = __Pyx_PyObject_Call2Args(method, obj, arg);
Py_DECREF(method);
return result;
}
if (unlikely(!method)) return NULL;
return __Pyx__PyObject_CallMethod1(method, arg);
}
/////////////// PyObjectCallMethod2.proto ///////////////
static PyObject* __Pyx_PyObject_CallMethod2(PyObject* obj, PyObject* method_name, PyObject* arg1, PyObject* arg2); /*proto*/
/////////////// PyObjectCallMethod2 ///////////////
//@requires: PyObjectCall
//@requires: PyFunctionFastCall
//@requires: PyCFunctionFastCall
//@requires: PyObjectCall2Args
static PyObject* __Pyx_PyObject_Call3Args(PyObject* function, PyObject* arg1, PyObject* arg2, PyObject* arg3) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[3] = {arg1, arg2, arg3};
return __Pyx_PyFunction_FastCall(function, args, 3);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[3] = {arg1, arg2, arg3};
return __Pyx_PyFunction_FastCall(function, args, 3);
}
#endif
args = PyTuple_New(3);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(arg3);
PyTuple_SET_ITEM(args, 2, arg3);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
return result;
}
static PyObject* __Pyx_PyObject_CallMethod2(PyObject* obj, PyObject* method_name, PyObject* arg1, PyObject* arg2) {
PyObject *args, *method = NULL, *result = NULL;
int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
if (likely(is_method)) {
result = __Pyx_PyObject_Call3Args(method, obj, arg1, arg2);
Py_DECREF(method);
return result;
}
if (unlikely(!method)) return NULL;
result = __Pyx_PyObject_Call2Args(method, arg1, arg2);
Py_DECREF(method);
return result;
}
/////////////// tp_new.proto ///////////////
#define __Pyx_tp_new(type_obj, args) __Pyx_tp_new_kwargs(type_obj, args, NULL)
static CYTHON_INLINE PyObject* __Pyx_tp_new_kwargs(PyObject* type_obj, PyObject* args, PyObject* kwargs) {
return (PyObject*) (((PyTypeObject*)type_obj)->tp_new((PyTypeObject*)type_obj, args, kwargs));
}
/////////////// PyObjectCall.proto ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); /*proto*/
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/////////////// PyObjectCall ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = Py_TYPE(func)->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/////////////// PyObjectCallMethO.proto ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); /*proto*/
#endif
/////////////// PyObjectCallMethO ///////////////
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/////////////// PyFunctionFastCall.proto ///////////////
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs) \
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
// let's assume that the non-public C-API function might still change during the 3.6 beta phase
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
// Backport from Python 3
// Assert a build-time dependency, as an expression.
// Your compile will fail if the condition isn't true, or can't be evaluated
// by the compiler. This can be used in an expression: its value is 0.
// Example:
// #define foo_to_char(foo) \
// ((char *)(foo) \
// + Py_BUILD_ASSERT_EXPR(offsetof(struct foo, string) == 0))
//
// Written by Rusty Russell, public domain, http://ccodearchive.net/
#define __Pyx_BUILD_ASSERT_EXPR(cond) \
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
// Get the size of a structure member in bytes
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
#if CYTHON_FAST_PYCALL
// Initialised by module init code.
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
// This is the long runtime version of
// #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus)
// offsetof(PyFrameObject, f_localsplus) differs between regular C-Python and Stackless Python.
// Therefore the offset is computed at run time from PyFrame_type.tp_basicsize. That is feasible,
// because f_localsplus is the last field of PyFrameObject (checked by Py_BUILD_ASSERT_EXPR below).
#define __Pxy_PyFrame_Initialize_Offsets() \
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)), \
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame) \
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif // CYTHON_FAST_PYCALL
#endif
/////////////// PyFunctionFastCall ///////////////
// copied from CPython 3.6 ceval.c
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
//#if PY_VERSION_HEX >= 0x03050000
//PyObject *name, *qualname;
//#endif
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
/* Fast paths */
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
//#if PY_VERSION_HEX >= 0x03050000
//name = ((PyFunctionObject *)func) -> func_name;
//qualname = ((PyFunctionObject *)func) -> func_qualname;
//#endif
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
//#if PY_VERSION_HEX >= 0x03050000
//return _PyEval_EvalCodeWithName((PyObject*)co, globals, (PyObject *)NULL,
// args, nargs,
// NULL, 0,
// d, nd, kwdefs,
// closure, name, qualname);
//#elif PY_MAJOR_VERSION >= 3
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif /* CPython < 3.6 */
#endif /* CYTHON_FAST_PYCALL */
/////////////// PyCFunctionFastCall.proto ///////////////
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/////////////// PyCFunctionFastCall ///////////////
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif /* CYTHON_FAST_PYCCALL */
/////////////// PyObjectCall2Args.proto ///////////////
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); /*proto*/
/////////////// PyObjectCall2Args ///////////////
//@requires: PyObjectCall
//@requires: PyFunctionFastCall
//@requires: PyCFunctionFastCall
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/////////////// PyObjectCallOneArg.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /*proto*/
/////////////// PyObjectCallOneArg ///////////////
//@requires: PyObjectCallMethO
//@requires: PyObjectCall
//@requires: PyFunctionFastCall
//@requires: PyCFunctionFastCall
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
// fast and simple case that we are optimising for
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (__Pyx_PyFastCFunction_Check(func)) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/////////////// PyObjectCallNoArg.proto ///////////////
//@requires: PyObjectCall
//@substitute: naming
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); /*proto*/
#else
#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, $empty_tuple, NULL)
#endif
/////////////// PyObjectCallNoArg ///////////////
//@requires: PyObjectCallMethO
//@requires: PyObjectCall
//@requires: PyFunctionFastCall
//@substitute: naming
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, NULL, 0);
}
#endif
#ifdef __Pyx_CyFunction_USED
if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
#else
if (likely(PyCFunction_Check(func)))
#endif
{
if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
// fast and simple case that we are optimising for
return __Pyx_PyObject_CallMethO(func, NULL);
}
}
return __Pyx_PyObject_Call(func, $empty_tuple, NULL);
}
#endif
/////////////// MatrixMultiply.proto ///////////////
#if PY_VERSION_HEX >= 0x03050000
#define __Pyx_PyNumber_MatrixMultiply(x,y) PyNumber_MatrixMultiply(x,y)
#define __Pyx_PyNumber_InPlaceMatrixMultiply(x,y) PyNumber_InPlaceMatrixMultiply(x,y)
#else
#define __Pyx_PyNumber_MatrixMultiply(x,y) __Pyx__PyNumber_MatrixMultiply(x, y, "@")
static PyObject* __Pyx__PyNumber_MatrixMultiply(PyObject* x, PyObject* y, const char* op_name);
static PyObject* __Pyx_PyNumber_InPlaceMatrixMultiply(PyObject* x, PyObject* y);
#endif
/////////////// MatrixMultiply ///////////////
//@requires: PyObjectGetAttrStr
//@requires: PyObjectCallOneArg
//@requires: PyFunctionFastCall
//@requires: PyCFunctionFastCall
#if PY_VERSION_HEX < 0x03050000
static PyObject* __Pyx_PyObject_CallMatrixMethod(PyObject* method, PyObject* arg) {
// NOTE: eats the method reference
PyObject *result = NULL;
#if CYTHON_UNPACK_METHODS
if (likely(PyMethod_Check(method))) {
PyObject *self = PyMethod_GET_SELF(method);
if (likely(self)) {
PyObject *args;
PyObject *function = PyMethod_GET_FUNCTION(method);
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {self, arg};
result = __Pyx_PyFunction_FastCall(function, args, 2);
goto done;
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {self, arg};
result = __Pyx_PyCFunction_FastCall(function, args, 2);
goto done;
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(self);
PyTuple_SET_ITEM(args, 0, self);
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 1, arg);
Py_INCREF(function);
Py_DECREF(method); method = NULL;
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
return result;
}
}
#endif
result = __Pyx_PyObject_CallOneArg(method, arg);
done:
Py_DECREF(method);
return result;
}
#define __Pyx_TryMatrixMethod(x, y, py_method_name) { \
PyObject *func = __Pyx_PyObject_GetAttrStr(x, py_method_name); \
if (func) { \
PyObject *result = __Pyx_PyObject_CallMatrixMethod(func, y); \
if (result != Py_NotImplemented) \
return result; \
Py_DECREF(result); \
} else { \
if (!PyErr_ExceptionMatches(PyExc_AttributeError)) \
return NULL; \
PyErr_Clear(); \
} \
}
static PyObject* __Pyx__PyNumber_MatrixMultiply(PyObject* x, PyObject* y, const char* op_name) {
int right_is_subtype = PyObject_IsSubclass((PyObject*)Py_TYPE(y), (PyObject*)Py_TYPE(x));
if (unlikely(right_is_subtype == -1))
return NULL;
if (right_is_subtype) {
// to allow subtypes to override parent behaviour, try reversed operation first
// see note at https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types
__Pyx_TryMatrixMethod(y, x, PYIDENT("__rmatmul__"))
}
__Pyx_TryMatrixMethod(x, y, PYIDENT("__matmul__"))
if (!right_is_subtype) {
__Pyx_TryMatrixMethod(y, x, PYIDENT("__rmatmul__"))
}
PyErr_Format(PyExc_TypeError,
"unsupported operand type(s) for %.2s: '%.100s' and '%.100s'",
op_name,
Py_TYPE(x)->tp_name,
Py_TYPE(y)->tp_name);
return NULL;
}
static PyObject* __Pyx_PyNumber_InPlaceMatrixMultiply(PyObject* x, PyObject* y) {
__Pyx_TryMatrixMethod(x, y, PYIDENT("__imatmul__"))
return __Pyx__PyNumber_MatrixMultiply(x, y, "@=");
}
#undef __Pyx_TryMatrixMethod
#endif
/////////////// PyDictVersioning.proto ///////////////
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) \
(version_var) = __PYX_GET_DICT_VERSION(dict); \
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) { \
static PY_UINT64_T __pyx_dict_version = 0; \
static PyObject *__pyx_dict_cached_value = NULL; \
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) { \
(VAR) = __pyx_dict_cached_value; \
} else { \
(VAR) = __pyx_dict_cached_value = (LOOKUP); \
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT); \
} \
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); /*proto*/
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); /*proto*/
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); /*proto*/
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/////////////// PyDictVersioning ///////////////
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/Optimize.c 0000644 0001751 0000171 00000130200 00000000000 020607 0 ustar 00runner docker 0000000 0000000 /*
* Optional optimisations of built-in functions and methods.
*
* Required replacements of builtins are in Builtins.c.
*
* General object operations and protocols are in ObjectHandling.c.
*/
/////////////// append.proto ///////////////
static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x); /*proto*/
/////////////// append ///////////////
//@requires: ListAppend
//@requires: ObjectHandling.c::PyObjectCallMethod1
static CYTHON_INLINE int __Pyx_PyObject_Append(PyObject* L, PyObject* x) {
if (likely(PyList_CheckExact(L))) {
if (unlikely(__Pyx_PyList_Append(L, x) < 0)) return -1;
} else {
PyObject* retval = __Pyx_PyObject_CallMethod1(L, PYIDENT("append"), x);
if (unlikely(!retval))
return -1;
Py_DECREF(retval);
}
return 0;
}
/////////////// ListAppend.proto ///////////////
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/////////////// ListCompAppend.proto ///////////////
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
//////////////////// ListExtend.proto ////////////////////
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/////////////// pop.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L); /*proto*/
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L); /*proto*/
#define __Pyx_PyObject_Pop(L) (likely(PyList_CheckExact(L)) ? \
__Pyx_PyList_Pop(L) : __Pyx__PyObject_Pop(L))
#else
#define __Pyx_PyList_Pop(L) __Pyx__PyObject_Pop(L)
#define __Pyx_PyObject_Pop(L) __Pyx__PyObject_Pop(L)
#endif
/////////////// pop ///////////////
//@requires: ObjectHandling.c::PyObjectCallMethod0
static CYTHON_INLINE PyObject* __Pyx__PyObject_Pop(PyObject* L) {
if (Py_TYPE(L) == &PySet_Type) {
return PySet_Pop(L);
}
return __Pyx_PyObject_CallMethod0(L, PYIDENT("pop"));
}
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE PyObject* __Pyx_PyList_Pop(PyObject* L) {
/* Check that both the size is positive and no reallocation shrinking needs to be done. */
if (likely(PyList_GET_SIZE(L) > (((PyListObject*)L)->allocated >> 1))) {
__Pyx_SET_SIZE(L, Py_SIZE(L) - 1);
return PyList_GET_ITEM(L, PyList_GET_SIZE(L));
}
return CALL_UNBOUND_METHOD(PyList_Type, "pop", L);
}
#endif
/////////////// pop_index.proto ///////////////
static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix); /*proto*/
static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix); /*proto*/
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix); /*proto*/
#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \
(likely(PyList_CheckExact(L) && __Pyx_fits_Py_ssize_t(ix, type, is_signed))) ? \
__Pyx__PyList_PopIndex(L, py_ix, ix) : ( \
(unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \
__Pyx__PyObject_PopIndex(L, py_ix)))
#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \
__Pyx_fits_Py_ssize_t(ix, type, is_signed) ? \
__Pyx__PyList_PopIndex(L, py_ix, ix) : ( \
(unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \
__Pyx__PyObject_PopIndex(L, py_ix)))
#else
#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) \
__Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func)
#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) ( \
(unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) : \
__Pyx__PyObject_PopIndex(L, py_ix))
#endif
/////////////// pop_index ///////////////
//@requires: ObjectHandling.c::PyObjectCallMethod1
static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix) {
PyObject *r;
if (unlikely(!py_ix)) return NULL;
r = __Pyx__PyObject_PopIndex(L, py_ix);
Py_DECREF(py_ix);
return r;
}
static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix) {
return __Pyx_PyObject_CallMethod1(L, PYIDENT("pop"), py_ix);
}
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix) {
Py_ssize_t size = PyList_GET_SIZE(L);
if (likely(size > (((PyListObject*)L)->allocated >> 1))) {
Py_ssize_t cix = ix;
if (cix < 0) {
cix += size;
}
if (likely(__Pyx_is_valid_index(cix, size))) {
PyObject* v = PyList_GET_ITEM(L, cix);
__Pyx_SET_SIZE(L, Py_SIZE(L) - 1);
size -= 1;
memmove(&PyList_GET_ITEM(L, cix), &PyList_GET_ITEM(L, cix+1), (size_t)(size-cix)*sizeof(PyObject*));
return v;
}
}
if (py_ix == Py_None) {
return __Pyx__PyObject_PopNewIndex(L, PyInt_FromSsize_t(ix));
} else {
return __Pyx__PyObject_PopIndex(L, py_ix);
}
}
#endif
/////////////// dict_getitem_default.proto ///////////////
static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value); /*proto*/
/////////////// dict_getitem_default ///////////////
static PyObject* __Pyx_PyDict_GetItemDefault(PyObject* d, PyObject* key, PyObject* default_value) {
PyObject* value;
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (unlikely(PyErr_Occurred()))
return NULL;
value = default_value;
}
Py_INCREF(value);
// avoid C compiler warning about unused utility functions
if ((1));
#else
if (PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key)) {
/* these presumably have safe hash functions */
value = PyDict_GetItem(d, key);
if (unlikely(!value)) {
value = default_value;
}
Py_INCREF(value);
}
#endif
else {
if (default_value == Py_None)
value = CALL_UNBOUND_METHOD(PyDict_Type, "get", d, key);
else
value = CALL_UNBOUND_METHOD(PyDict_Type, "get", d, key, default_value);
}
return value;
}
/////////////// dict_setdefault.proto ///////////////
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value, int is_safe_type); /*proto*/
/////////////// dict_setdefault ///////////////
static CYTHON_INLINE PyObject *__Pyx_PyDict_SetDefault(PyObject *d, PyObject *key, PyObject *default_value,
CYTHON_UNUSED int is_safe_type) {
PyObject* value;
#if PY_VERSION_HEX >= 0x030400A0
// we keep the method call at the end to avoid "unused" C compiler warnings
if ((1)) {
value = PyDict_SetDefault(d, key, default_value);
if (unlikely(!value)) return NULL;
Py_INCREF(value);
#else
if (is_safe_type == 1 || (is_safe_type == -1 &&
/* the following builtins presumably have repeatably safe and fast hash functions */
#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
(PyUnicode_CheckExact(key) || PyString_CheckExact(key) || PyLong_CheckExact(key)))) {
value = PyDict_GetItemWithError(d, key);
if (unlikely(!value)) {
if (unlikely(PyErr_Occurred()))
return NULL;
if (unlikely(PyDict_SetItem(d, key, default_value) == -1))
return NULL;
value = default_value;
}
Py_INCREF(value);
#else
(PyString_CheckExact(key) || PyUnicode_CheckExact(key) || PyInt_CheckExact(key) || PyLong_CheckExact(key)))) {
value = PyDict_GetItem(d, key);
if (unlikely(!value)) {
if (unlikely(PyDict_SetItem(d, key, default_value) == -1))
return NULL;
value = default_value;
}
Py_INCREF(value);
#endif
#endif
} else {
value = CALL_UNBOUND_METHOD(PyDict_Type, "setdefault", d, key, default_value);
}
return value;
}
/////////////// py_dict_clear.proto ///////////////
#define __Pyx_PyDict_Clear(d) (PyDict_Clear(d), 0)
/////////////// py_dict_pop.proto ///////////////
static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value); /*proto*/
/////////////// py_dict_pop ///////////////
static CYTHON_INLINE PyObject *__Pyx_PyDict_Pop(PyObject *d, PyObject *key, PyObject *default_value) {
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B3
if ((1)) {
return _PyDict_Pop(d, key, default_value);
} else
// avoid "function unused" warnings
#endif
if (default_value) {
return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key, default_value);
} else {
return CALL_UNBOUND_METHOD(PyDict_Type, "pop", d, key);
}
}
/////////////// dict_iter.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name,
Py_ssize_t* p_orig_length, int* p_is_dict);
static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos,
PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict);
/////////////// dict_iter ///////////////
//@requires: ObjectHandling.c::UnpackTuple2
//@requires: ObjectHandling.c::IterFinish
//@requires: ObjectHandling.c::PyObjectCallMethod0
static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name,
Py_ssize_t* p_orig_length, int* p_source_is_dict) {
is_dict = is_dict || likely(PyDict_CheckExact(iterable));
*p_source_is_dict = is_dict;
if (is_dict) {
#if !CYTHON_COMPILING_IN_PYPY
*p_orig_length = PyDict_Size(iterable);
Py_INCREF(iterable);
return iterable;
#elif PY_MAJOR_VERSION >= 3
// On PyPy3, we need to translate manually a few method names.
// This logic is not needed on CPython thanks to the fast case above.
static PyObject *py_items = NULL, *py_keys = NULL, *py_values = NULL;
PyObject **pp = NULL;
if (method_name) {
const char *name = PyUnicode_AsUTF8(method_name);
if (strcmp(name, "iteritems") == 0) pp = &py_items;
else if (strcmp(name, "iterkeys") == 0) pp = &py_keys;
else if (strcmp(name, "itervalues") == 0) pp = &py_values;
if (pp) {
if (!*pp) {
*pp = PyUnicode_FromString(name + 4);
if (!*pp)
return NULL;
}
method_name = *pp;
}
}
#endif
}
*p_orig_length = 0;
if (method_name) {
PyObject* iter;
iterable = __Pyx_PyObject_CallMethod0(iterable, method_name);
if (!iterable)
return NULL;
#if !CYTHON_COMPILING_IN_PYPY
if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable))
return iterable;
#endif
iter = PyObject_GetIter(iterable);
Py_DECREF(iterable);
return iter;
}
return PyObject_GetIter(iterable);
}
static CYTHON_INLINE int __Pyx_dict_iter_next(
PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos,
PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) {
PyObject* next_item;
#if !CYTHON_COMPILING_IN_PYPY
if (source_is_dict) {
PyObject *key, *value;
if (unlikely(orig_length != PyDict_Size(iter_obj))) {
PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration");
return -1;
}
if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) {
return 0;
}
if (pitem) {
PyObject* tuple = PyTuple_New(2);
if (unlikely(!tuple)) {
return -1;
}
Py_INCREF(key);
Py_INCREF(value);
PyTuple_SET_ITEM(tuple, 0, key);
PyTuple_SET_ITEM(tuple, 1, value);
*pitem = tuple;
} else {
if (pkey) {
Py_INCREF(key);
*pkey = key;
}
if (pvalue) {
Py_INCREF(value);
*pvalue = value;
}
}
return 1;
} else if (PyTuple_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0;
*ppos = pos + 1;
next_item = PyTuple_GET_ITEM(iter_obj, pos);
Py_INCREF(next_item);
} else if (PyList_CheckExact(iter_obj)) {
Py_ssize_t pos = *ppos;
if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0;
*ppos = pos + 1;
next_item = PyList_GET_ITEM(iter_obj, pos);
Py_INCREF(next_item);
} else
#endif
{
next_item = PyIter_Next(iter_obj);
if (unlikely(!next_item)) {
return __Pyx_IterFinish();
}
}
if (pitem) {
*pitem = next_item;
} else if (pkey && pvalue) {
if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1))
return -1;
} else if (pkey) {
*pkey = next_item;
} else {
*pvalue = next_item;
}
return 1;
}
/////////////// set_iter.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set,
Py_ssize_t* p_orig_length, int* p_source_is_set); /*proto*/
static CYTHON_INLINE int __Pyx_set_iter_next(
PyObject* iter_obj, Py_ssize_t orig_length,
Py_ssize_t* ppos, PyObject **value,
int source_is_set); /*proto*/
/////////////// set_iter ///////////////
//@requires: ObjectHandling.c::IterFinish
static CYTHON_INLINE PyObject* __Pyx_set_iterator(PyObject* iterable, int is_set,
Py_ssize_t* p_orig_length, int* p_source_is_set) {
#if CYTHON_COMPILING_IN_CPYTHON
is_set = is_set || likely(PySet_CheckExact(iterable) || PyFrozenSet_CheckExact(iterable));
*p_source_is_set = is_set;
if (likely(is_set)) {
*p_orig_length = PySet_Size(iterable);
Py_INCREF(iterable);
return iterable;
}
#else
(void)is_set;
*p_source_is_set = 0;
#endif
*p_orig_length = 0;
return PyObject_GetIter(iterable);
}
static CYTHON_INLINE int __Pyx_set_iter_next(
PyObject* iter_obj, Py_ssize_t orig_length,
Py_ssize_t* ppos, PyObject **value,
int source_is_set) {
if (!CYTHON_COMPILING_IN_CPYTHON || unlikely(!source_is_set)) {
*value = PyIter_Next(iter_obj);
if (unlikely(!*value)) {
return __Pyx_IterFinish();
}
(void)orig_length;
(void)ppos;
return 1;
}
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(PySet_GET_SIZE(iter_obj) != orig_length)) {
PyErr_SetString(
PyExc_RuntimeError,
"set changed size during iteration");
return -1;
}
{
Py_hash_t hash;
int ret = _PySet_NextEntry(iter_obj, ppos, value, &hash);
// CPython does not raise errors here, only if !isinstance(iter_obj, set/frozenset)
assert (ret != -1);
if (likely(ret)) {
Py_INCREF(*value);
return 1;
}
}
#endif
return 0;
}
/////////////// py_set_discard_unhashable ///////////////
//@requires: Builtins.c::pyfrozenset_new
static int __Pyx_PySet_DiscardUnhashable(PyObject *set, PyObject *key) {
PyObject *tmpkey;
int rv;
if (likely(!PySet_Check(key) || !PyErr_ExceptionMatches(PyExc_TypeError)))
return -1;
PyErr_Clear();
tmpkey = __Pyx_PyFrozenSet_New(key);
if (tmpkey == NULL)
return -1;
rv = PySet_Discard(set, tmpkey);
Py_DECREF(tmpkey);
return rv;
}
/////////////// py_set_discard.proto ///////////////
static CYTHON_INLINE int __Pyx_PySet_Discard(PyObject *set, PyObject *key); /*proto*/
/////////////// py_set_discard ///////////////
//@requires: py_set_discard_unhashable
static CYTHON_INLINE int __Pyx_PySet_Discard(PyObject *set, PyObject *key) {
int found = PySet_Discard(set, key);
// Convert *key* to frozenset if necessary
if (unlikely(found < 0)) {
found = __Pyx_PySet_DiscardUnhashable(set, key);
}
// note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
return found;
}
/////////////// py_set_remove.proto ///////////////
static CYTHON_INLINE int __Pyx_PySet_Remove(PyObject *set, PyObject *key); /*proto*/
/////////////// py_set_remove ///////////////
//@requires: py_set_discard_unhashable
static int __Pyx_PySet_RemoveNotFound(PyObject *set, PyObject *key, int found) {
// Convert *key* to frozenset if necessary
if (unlikely(found < 0)) {
found = __Pyx_PySet_DiscardUnhashable(set, key);
}
if (likely(found == 0)) {
// Not found
PyObject *tup;
tup = PyTuple_Pack(1, key);
if (!tup)
return -1;
PyErr_SetObject(PyExc_KeyError, tup);
Py_DECREF(tup);
return -1;
}
// note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
return found;
}
static CYTHON_INLINE int __Pyx_PySet_Remove(PyObject *set, PyObject *key) {
int found = PySet_Discard(set, key);
if (unlikely(found != 1)) {
// note: returns -1 on error, 0 (not found) or 1 (found) otherwise => error check for -1 or < 0 works
return __Pyx_PySet_RemoveNotFound(set, key, found);
}
return 0;
}
/////////////// unicode_iter.proto ///////////////
static CYTHON_INLINE int __Pyx_init_unicode_iteration(
PyObject* ustring, Py_ssize_t *length, void** data, int *kind); /* proto */
/////////////// unicode_iter ///////////////
static CYTHON_INLINE int __Pyx_init_unicode_iteration(
PyObject* ustring, Py_ssize_t *length, void** data, int *kind) {
#if CYTHON_PEP393_ENABLED
if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return -1;
*kind = PyUnicode_KIND(ustring);
*length = PyUnicode_GET_LENGTH(ustring);
*data = PyUnicode_DATA(ustring);
#else
*kind = 0;
*length = PyUnicode_GET_SIZE(ustring);
*data = (void*)PyUnicode_AS_UNICODE(ustring);
#endif
return 0;
}
/////////////// pyobject_as_double.proto ///////////////
static double __Pyx__PyObject_AsDouble(PyObject* obj); /* proto */
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyObject_AsDouble(obj) \
(likely(PyFloat_CheckExact(obj)) ? PyFloat_AS_DOUBLE(obj) : \
likely(PyInt_CheckExact(obj)) ? \
PyFloat_AsDouble(obj) : __Pyx__PyObject_AsDouble(obj))
#else
#define __Pyx_PyObject_AsDouble(obj) \
((likely(PyFloat_CheckExact(obj))) ? \
PyFloat_AS_DOUBLE(obj) : __Pyx__PyObject_AsDouble(obj))
#endif
/////////////// pyobject_as_double ///////////////
static double __Pyx__PyObject_AsDouble(PyObject* obj) {
PyObject* float_value;
#if !CYTHON_USE_TYPE_SLOTS
float_value = PyNumber_Float(obj); if ((0)) goto bad;
#else
PyNumberMethods *nb = Py_TYPE(obj)->tp_as_number;
if (likely(nb) && likely(nb->nb_float)) {
float_value = nb->nb_float(obj);
if (likely(float_value) && unlikely(!PyFloat_Check(float_value))) {
PyErr_Format(PyExc_TypeError,
"__float__ returned non-float (type %.200s)",
Py_TYPE(float_value)->tp_name);
Py_DECREF(float_value);
goto bad;
}
} else if (PyUnicode_CheckExact(obj) || PyBytes_CheckExact(obj)) {
#if PY_MAJOR_VERSION >= 3
float_value = PyFloat_FromString(obj);
#else
float_value = PyFloat_FromString(obj, 0);
#endif
} else {
PyObject* args = PyTuple_New(1);
if (unlikely(!args)) goto bad;
PyTuple_SET_ITEM(args, 0, obj);
float_value = PyObject_Call((PyObject*)&PyFloat_Type, args, 0);
PyTuple_SET_ITEM(args, 0, 0);
Py_DECREF(args);
}
#endif
if (likely(float_value)) {
double value = PyFloat_AS_DOUBLE(float_value);
Py_DECREF(float_value);
return value;
}
bad:
return (double)-1;
}
/////////////// PyNumberPow2.proto ///////////////
#define __Pyx_PyNumber_InPlacePowerOf2(a, b, c) __Pyx__PyNumber_PowerOf2(a, b, c, 1)
#define __Pyx_PyNumber_PowerOf2(a, b, c) __Pyx__PyNumber_PowerOf2(a, b, c, 0)
static PyObject* __Pyx__PyNumber_PowerOf2(PyObject *two, PyObject *exp, PyObject *none, int inplace); /*proto*/
/////////////// PyNumberPow2 ///////////////
static PyObject* __Pyx__PyNumber_PowerOf2(PyObject *two, PyObject *exp, PyObject *none, int inplace) {
// in CPython, 1<ob_digit[0];
} else if (size == 0) {
return PyInt_FromLong(1L);
} else if (unlikely(size < 0)) {
goto fallback;
} else {
shiftby = PyLong_AsSsize_t(exp);
}
#else
shiftby = PyLong_AsSsize_t(exp);
#endif
} else {
goto fallback;
}
if (likely(shiftby >= 0)) {
if ((size_t)shiftby <= sizeof(long) * 8 - 2) {
long value = 1L << shiftby;
return PyInt_FromLong(value);
#ifdef HAVE_LONG_LONG
} else if ((size_t)shiftby <= sizeof(unsigned PY_LONG_LONG) * 8 - 1) {
unsigned PY_LONG_LONG value = ((unsigned PY_LONG_LONG)1) << shiftby;
return PyLong_FromUnsignedLongLong(value);
#endif
} else {
PyObject *result, *one = PyInt_FromLong(1L);
if (unlikely(!one)) return NULL;
result = PyNumber_Lshift(one, exp);
Py_DECREF(one);
return result;
}
} else if (shiftby == -1 && PyErr_Occurred()) {
PyErr_Clear();
}
fallback:
#endif
return (inplace ? PyNumber_InPlacePower : PyNumber_Power)(two, exp, none);
}
/////////////// PyIntCompare.proto ///////////////
{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}}
static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, long inplace); /*proto*/
/////////////// PyIntCompare ///////////////
{{py: pyval, ival = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }}
{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}}
{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}}
{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}}
{{py: slot_name = op.lower() }}
{{py: c_op = {'Eq': '==', 'Ne': '!='}[op] }}
{{py:
return_compare = (
(lambda a,b,c_op, return_true=return_true, return_false=return_false: "if ({a} {c_op} {b}) {return_true}; else {return_false};".format(
a=a, b=b, c_op=c_op, return_true=return_true, return_false=return_false))
if ret_type.is_pyobject else
(lambda a,b,c_op: "return ({a} {c_op} {b});".format(a=a, b=b, c_op=c_op))
)
}}
static CYTHON_INLINE {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) {
if (op1 == op2) {
{{return_true if op == 'Eq' else return_false}};
}
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact({{pyval}}))) {
const long {{'a' if order == 'CObj' else 'b'}} = intval;
long {{ival}} = PyInt_AS_LONG({{pyval}});
{{return_compare('a', 'b', c_op)}}
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact({{pyval}}))) {
int unequal;
unsigned long uintval;
Py_ssize_t size = Py_SIZE({{pyval}});
const digit* digits = ((PyLongObject*){{pyval}})->ob_digit;
if (intval == 0) {
// == 0 => Py_SIZE(pyval) == 0
{{return_compare('size', '0', c_op)}}
} else if (intval < 0) {
// < 0 => Py_SIZE(pyval) < 0
if (size >= 0)
{{return_false if op == 'Eq' else return_true}};
// both are negative => can use absolute values now.
intval = -intval;
size = -size;
} else {
// > 0 => Py_SIZE(pyval) > 0
if (size <= 0)
{{return_false if op == 'Eq' else return_true}};
}
// After checking that the sign is the same (and excluding 0), now compare the absolute values.
// When inlining, the C compiler should select exactly one line from this unrolled loop.
uintval = (unsigned long) intval;
{{for _size in range(4, 0, -1)}}
#if PyLong_SHIFT * {{_size}} < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * {{_size}})) {
// The C integer value is between (PyLong_BASE ** _size) and MIN(PyLong_BASE ** _size, LONG_MAX).
unequal = (size != {{_size+1}}) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
{{for _i in range(1, _size+1)}} | (digits[{{_i}}] != ((uintval >> ({{_i}} * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)){{endfor}};
} else
#endif
{{endfor}}
unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK));
{{return_compare('unequal', '0', c_op)}}
}
#endif
if (PyFloat_CheckExact({{pyval}})) {
const long {{'a' if order == 'CObj' else 'b'}} = intval;
double {{ival}} = PyFloat_AS_DOUBLE({{pyval}});
{{return_compare('(double)a', '(double)b', c_op)}}
}
return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(
PyObject_RichCompare(op1, op2, Py_{{op.upper()}}));
}
/////////////// PyIntBinop.proto ///////////////
{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}}
#if !CYTHON_COMPILING_IN_PYPY
static {{c_ret_type}} __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check); /*proto*/
#else
#define __Pyx_PyInt_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(op1, op2, intval, inplace, zerodivision_check) \
{{if op in ('Eq', 'Ne')}}{{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(PyObject_RichCompare(op1, op2, Py_{{op.upper()}}))
{{else}}(inplace ? PyNumber_InPlace{{op}}(op1, op2) : PyNumber_{{op}}(op1, op2))
{{endif}}
#endif
/////////////// PyIntBinop ///////////////
#if !CYTHON_COMPILING_IN_PYPY
{{py: from Cython.Utility import pylong_join }}
{{py: pyval, ival = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }}
{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}}
{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}}
{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}}
{{py: slot_name = {'TrueDivide': 'true_divide', 'FloorDivide': 'floor_divide'}.get(op, op.lower()) }}
{{py: cfunc_name = '__Pyx_PyInt_%s%s%s' % ('' if ret_type.is_pyobject else 'Bool', op, order)}}
{{py: zerodiv_check = lambda operand, _cfunc_name=cfunc_name: '%s_ZeroDivisionError(%s)' % (_cfunc_name, operand)}}
{{py:
c_op = {
'Add': '+', 'Subtract': '-', 'Remainder': '%', 'TrueDivide': '/', 'FloorDivide': '/',
'Or': '|', 'Xor': '^', 'And': '&', 'Rshift': '>>', 'Lshift': '<<',
'Eq': '==', 'Ne': '!=',
}[op]
}}
{{if op in ('TrueDivide', 'FloorDivide', 'Remainder')}}
#if PY_MAJOR_VERSION < 3 || CYTHON_USE_PYLONG_INTERNALS
#define {{zerodiv_check('operand')}} \
if (unlikely(zerodivision_check && ((operand) == 0))) { \
PyErr_SetString(PyExc_ZeroDivisionError, "integer division{{if op == 'Remainder'}} or modulo{{endif}} by zero"); \
return NULL; \
}
#endif
{{endif}}
static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
// Prevent "unused" warnings.
(void)inplace;
(void)zerodivision_check;
{{if op in ('Eq', 'Ne')}}
if (op1 == op2) {
{{return_true if op == 'Eq' else return_false}};
}
{{endif}}
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact({{pyval}}))) {
const long {{'a' if order == 'CObj' else 'b'}} = intval;
{{if c_op in '+-%' or op == 'FloorDivide'}}
long x;
{{endif}}
long {{ival}} = PyInt_AS_LONG({{pyval}});
{{if op in ('Eq', 'Ne')}}
if (a {{c_op}} b) {
{{return_true}};
} else {
{{return_false}};
}
{{elif c_op in '+-'}}
// adapted from intobject.c in Py2.7:
// casts in the line below avoid undefined behaviour on overflow
x = (long)((unsigned long)a {{c_op}} b);
if (likely((x^a) >= 0 || (x^{{ '~' if op == 'Subtract' else '' }}b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2);
{{elif c_op == '%'}}
{{zerodiv_check('b')}}
// see ExprNodes.py :: mod_int_utility_code
x = a % b;
x += ((x != 0) & ((x ^ b) < 0)) * b;
return PyInt_FromLong(x);
{{elif op == 'TrueDivide'}}
{{zerodiv_check('b')}}
if (8 * sizeof(long) <= 53 || likely(labs({{ival}}) <= ((PY_LONG_LONG)1 << 53))) {
return PyFloat_FromDouble((double)a / (double)b);
}
// let Python do the rounding
return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2);
{{elif op == 'FloorDivide'}}
// INT_MIN / -1 is the only case that overflows, b == 0 is an error case
{{zerodiv_check('b')}}
if (unlikely(b == -1 && ((unsigned long)a) == 0-(unsigned long)a))
return PyInt_Type.tp_as_number->nb_{{slot_name}}(op1, op2);
else {
long q, r;
// see ExprNodes.py :: div_int_utility_code
q = a / b;
r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
x = q;
}
return PyInt_FromLong(x);
{{elif op == 'Lshift'}}
if (likely(b < (long) (sizeof(long)*8) && a == (a << b) >> b) || !a) {
return PyInt_FromLong(a {{c_op}} b);
}
{{else}}
// other operations are safe, no overflow
return PyInt_FromLong(a {{c_op}} b);
{{endif}}
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact({{pyval}}))) {
const long {{'a' if order == 'CObj' else 'b'}} = intval;
long {{ival}}{{if op not in ('Eq', 'Ne')}}, x{{endif}};
{{if op not in ('Eq', 'Ne', 'TrueDivide')}}
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG ll{{'a' if order == 'CObj' else 'b'}} = intval;
PY_LONG_LONG ll{{ival}}, llx;
#endif
{{endif}}
const digit* digits = ((PyLongObject*){{pyval}})->ob_digit;
const Py_ssize_t size = Py_SIZE({{pyval}});
// handle most common case first to avoid indirect branch and optimise branch prediction
if (likely(__Pyx_sst_abs(size) <= 1)) {
{{ival}} = likely(size) ? digits[0] : 0;
if (size == -1) {{ival}} = -{{ival}};
} else {
switch (size) {
{{for _size in range(2, 5)}}
{{for _case in (-_size, _size)}}
case {{_case}}:
if (8 * sizeof(long) - 1 > {{_size}} * PyLong_SHIFT{{if op == 'TrueDivide'}} && {{_size-1}} * PyLong_SHIFT < 53{{endif}}) {
{{ival}} = {{'-' if _case < 0 else ''}}(long) {{pylong_join(_size, 'digits')}};
break;
{{if op not in ('Eq', 'Ne', 'TrueDivide')}}
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > {{_size}} * PyLong_SHIFT) {
ll{{ival}} = {{'-' if _case < 0 else ''}}(PY_LONG_LONG) {{pylong_join(_size, 'digits', 'unsigned PY_LONG_LONG')}};
goto long_long;
#endif
{{endif}}
}
// if size doesn't fit into a long or PY_LONG_LONG anymore, fall through to default
CYTHON_FALLTHROUGH;
{{endfor}}
{{endfor}}
{{if op in ('Eq', 'Ne')}}
#if PyLong_SHIFT < 30 && PyLong_SHIFT != 15
// unusual setup - your fault
default: return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(
PyLong_Type.tp_richcompare({{'op1, op2' if order == 'ObjC' else 'op2, op1'}}, Py_{{op.upper()}}));
#else
// too large for the long values we allow => definitely not equal
default: {{return_false if op == 'Eq' else return_true}};
#endif
{{else}}
default: return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2);
{{endif}}
}
}
{{if op in ('Eq', 'Ne')}}
if (a {{c_op}} b) {
{{return_true}};
} else {
{{return_false}};
}
{{else}}
{{if c_op == '%'}}
{{zerodiv_check('b')}}
// see ExprNodes.py :: mod_int_utility_code
x = a % b;
x += ((x != 0) & ((x ^ b) < 0)) * b;
{{elif op == 'TrueDivide'}}
{{zerodiv_check('b')}}
if ((8 * sizeof(long) <= 53 || likely(labs({{ival}}) <= ((PY_LONG_LONG)1 << 53)))
|| __Pyx_sst_abs(size) <= 52 / PyLong_SHIFT) {
return PyFloat_FromDouble((double)a / (double)b);
}
return PyLong_Type.tp_as_number->nb_{{slot_name}}(op1, op2);
{{elif op == 'FloorDivide'}}
{{zerodiv_check('b')}}
{
long q, r;
// see ExprNodes.py :: div_int_utility_code
q = a / b;
r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
x = q;
}
{{else}}
x = a {{c_op}} b;
{{if op == 'Lshift'}}
#ifdef HAVE_LONG_LONG
if (unlikely(!(b < (long) (sizeof(long)*8) && a == x >> b)) && a) {
ll{{ival}} = {{ival}};
goto long_long;
}
#else
if (likely(b < (long) (sizeof(long)*8) && a == x >> b) || !a) /* execute return statement below */
#endif
{{endif}}
{{endif}}
return PyLong_FromLong(x);
{{if op != 'TrueDivide'}}
#ifdef HAVE_LONG_LONG
long_long:
{{if c_op == '%'}}
// see ExprNodes.py :: mod_int_utility_code
llx = lla % llb;
llx += ((llx != 0) & ((llx ^ llb) < 0)) * llb;
{{elif op == 'FloorDivide'}}
{
PY_LONG_LONG q, r;
// see ExprNodes.py :: div_int_utility_code
q = lla / llb;
r = lla - q*llb;
q -= ((r != 0) & ((r ^ llb) < 0));
llx = q;
}
{{else}}
llx = lla {{c_op}} llb;
{{if op == 'Lshift'}}
if (likely(lla == llx >> llb)) /* then execute 'return' below */
{{endif}}
{{endif}}
return PyLong_FromLongLong(llx);
#endif
{{endif}}{{# if op != 'TrueDivide' #}}
{{endif}}{{# if op in ('Eq', 'Ne') #}}
}
#endif
{{if c_op in '+-' or op in ('TrueDivide', 'Eq', 'Ne')}}
if (PyFloat_CheckExact({{pyval}})) {
const long {{'a' if order == 'CObj' else 'b'}} = intval;
double {{ival}} = PyFloat_AS_DOUBLE({{pyval}});
{{if op in ('Eq', 'Ne')}}
if ((double)a {{c_op}} (double)b) {
{{return_true}};
} else {
{{return_false}};
}
{{else}}
double result;
{{if op == 'TrueDivide'}}
if (unlikely(zerodivision_check && b == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero");
return NULL;
}
{{endif}}
// copied from floatobject.c in Py3.5:
PyFPE_START_PROTECT("{{op.lower() if not op.endswith('Divide') else 'divide'}}", return NULL)
result = ((double)a) {{c_op}} (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
{{endif}}
}
{{endif}}
{{if op in ('Eq', 'Ne')}}
return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(
PyObject_RichCompare(op1, op2, Py_{{op.upper()}}));
{{else}}
return (inplace ? PyNumber_InPlace{{op}} : PyNumber_{{op}})(op1, op2);
{{endif}}
}
#endif
/////////////// PyFloatBinop.proto ///////////////
{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}}
#if !CYTHON_COMPILING_IN_PYPY
static {{c_ret_type}} __Pyx_PyFloat_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); /*proto*/
#else
#define __Pyx_PyFloat_{{'' if ret_type.is_pyobject else 'Bool'}}{{op}}{{order}}(op1, op2, floatval, inplace, zerodivision_check) \
{{if op in ('Eq', 'Ne')}}{{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(PyObject_RichCompare(op1, op2, Py_{{op.upper()}}))
{{elif op == 'Divide'}}((inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2)))
{{else}}(inplace ? PyNumber_InPlace{{op}}(op1, op2) : PyNumber_{{op}}(op1, op2))
{{endif}}
#endif
/////////////// PyFloatBinop ///////////////
#if !CYTHON_COMPILING_IN_PYPY
{{py: from Cython.Utility import pylong_join }}
{{py: c_ret_type = 'PyObject*' if ret_type.is_pyobject else 'int'}}
{{py: return_true = 'Py_RETURN_TRUE' if ret_type.is_pyobject else 'return 1'}}
{{py: return_false = 'Py_RETURN_FALSE' if ret_type.is_pyobject else 'return 0'}}
{{py: pyval, fval = ('op2', 'b') if order == 'CObj' else ('op1', 'a') }}
{{py: cfunc_name = '__Pyx_PyFloat_%s%s%s' % ('' if ret_type.is_pyobject else 'Bool', op, order) }}
{{py: zerodiv_check = lambda operand, _cfunc_name=cfunc_name: '%s_ZeroDivisionError(%s)' % (_cfunc_name, operand)}}
{{py:
c_op = {
'Add': '+', 'Subtract': '-', 'TrueDivide': '/', 'Divide': '/', 'Remainder': '%',
'Eq': '==', 'Ne': '!=',
}[op]
}}
{{if order == 'CObj' and c_op in '%/'}}
#define {{zerodiv_check('operand')}} if (unlikely(zerodivision_check && ((operand) == 0))) { \
PyErr_SetString(PyExc_ZeroDivisionError, "float division{{if op == 'Remainder'}} or modulo{{endif}} by zero"); \
return NULL; \
}
{{endif}}
static {{c_ret_type}} {{cfunc_name}}(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) {
const double {{'a' if order == 'CObj' else 'b'}} = floatval;
double {{fval}}{{if op not in ('Eq', 'Ne')}}, result{{endif}};
// Prevent "unused" warnings.
(void)inplace;
(void)zerodivision_check;
{{if op in ('Eq', 'Ne')}}
if (op1 == op2) {
{{return_true if op == 'Eq' else return_false}};
}
{{endif}}
if (likely(PyFloat_CheckExact({{pyval}}))) {
{{fval}} = PyFloat_AS_DOUBLE({{pyval}});
{{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}}
} else
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact({{pyval}}))) {
{{fval}} = (double) PyInt_AS_LONG({{pyval}});
{{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}}
} else
#endif
if (likely(PyLong_CheckExact({{pyval}}))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*){{pyval}})->ob_digit;
const Py_ssize_t size = Py_SIZE({{pyval}});
switch (size) {
case 0: {{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check('0')}}{{else}}{{fval}} = 0.0;{{endif}} break;
case -1: {{fval}} = -(double) digits[0]; break;
case 1: {{fval}} = (double) digits[0]; break;
{{for _size in (2, 3, 4)}}
case -{{_size}}:
case {{_size}}:
if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || ({{_size-1}} * PyLong_SHIFT < 53))) {
{{fval}} = (double) {{pylong_join(_size, 'digits')}};
// let CPython do its own float rounding from 2**53 on (max. consecutive integer in double float)
if ((8 * sizeof(unsigned long) < 53) || ({{_size}} * PyLong_SHIFT < 53) || ({{fval}} < (double) ((PY_LONG_LONG)1 << 53))) {
if (size == {{-_size}})
{{fval}} = -{{fval}};
break;
}
}
// Fall through if size doesn't fit safely into a double anymore.
// It may not be obvious that this is a safe fall-through given the "fval < 2**53"
// check above. However, the number of digits that CPython uses for a given PyLong
// value is minimal, and together with the "(size-1) * SHIFT < 53" check above,
// this should make it safe.
CYTHON_FALLTHROUGH;
{{endfor}}
default:
#else
{
#endif
{{if op in ('Eq', 'Ne')}}
return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(
PyFloat_Type.tp_richcompare({{'op1, op2' if order == 'CObj' else 'op2, op1'}}, Py_{{op.upper()}}));
{{else}}
{{fval}} = PyLong_AsDouble({{pyval}});
if (unlikely({{fval}} == -1.0 && PyErr_Occurred())) return NULL;
{{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check(fval)}}{{endif}}
{{endif}}
}
} else {
{{if op in ('Eq', 'Ne')}}
return {{'' if ret_type.is_pyobject else '__Pyx_PyObject_IsTrueAndDecref'}}(
PyObject_RichCompare(op1, op2, Py_{{op.upper()}}));
{{elif op == 'Divide'}}
return (inplace ? __Pyx_PyNumber_InPlaceDivide(op1, op2) : __Pyx_PyNumber_Divide(op1, op2));
{{else}}
return (inplace ? PyNumber_InPlace{{op}} : PyNumber_{{op}})(op1, op2);
{{endif}}
}
{{if op in ('Eq', 'Ne')}}
if (a {{c_op}} b) {
{{return_true}};
} else {
{{return_false}};
}
{{else}}
// copied from floatobject.c in Py3.5:
{{if order == 'CObj' and c_op in '%/'}}{{zerodiv_check('b')}}{{endif}}
PyFPE_START_PROTECT("{{op.lower() if not op.endswith('Divide') else 'divide'}}", return NULL)
{{if c_op == '%'}}
result = fmod(a, b);
if (result)
result += ((result < 0) ^ (b < 0)) * b;
else
result = copysign(0.0, b);
{{else}}
result = a {{c_op}} b;
{{endif}}
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
{{endif}}
}
#endif
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/Overflow.c 0000644 0001751 0000171 00000030210 00000000000 020612 0 ustar 00runner docker 0000000 0000000 /*
These functions provide integer arithmetic with integer checking. They do not
actually raise an exception when an overflow is detected, but rather set a bit
in the overflow parameter. (This parameter may be re-used across several
arithmetic operations, so should be or-ed rather than assigned to.)
The implementation is divided into two parts, the signed and unsigned basecases,
which is where the magic happens, and a generic template matching a specific
type to an implementation based on its (c-compile-time) size and signedness.
When possible, branching is avoided, and preference is given to speed over
accuracy (a low rate of falsely "detected" overflows are acceptable,
undetected overflows are not).
TODO: Hook up checking.
TODO: Conditionally support 128-bit with intmax_t?
*/
/////////////// Common.proto ///////////////
static int __Pyx_check_twos_complement(void) {
if ((-1 != ~0)) {
PyErr_SetString(PyExc_RuntimeError, "Two's complement required for overflow checks.");
return 1;
} else if ((sizeof(short) == sizeof(int))) {
PyErr_SetString(PyExc_RuntimeError, "sizeof(short) < sizeof(int) required for overflow checks.");
return 1;
} else {
return 0;
}
}
#define __PYX_IS_UNSIGNED(type) ((((type) -1) > 0))
#define __PYX_SIGN_BIT(type) ((((unsigned type) 1) << (sizeof(type) * 8 - 1)))
#define __PYX_HALF_MAX(type) ((((type) 1) << (sizeof(type) * 8 - 2)))
#define __PYX_MIN(type) ((__PYX_IS_UNSIGNED(type) ? (type) 0 : 0 - __PYX_HALF_MAX(type) - __PYX_HALF_MAX(type)))
#define __PYX_MAX(type) ((~__PYX_MIN(type)))
#define __Pyx_add_no_overflow(a, b, overflow) ((a) + (b))
#define __Pyx_add_const_no_overflow(a, b, overflow) ((a) + (b))
#define __Pyx_sub_no_overflow(a, b, overflow) ((a) - (b))
#define __Pyx_sub_const_no_overflow(a, b, overflow) ((a) - (b))
#define __Pyx_mul_no_overflow(a, b, overflow) ((a) * (b))
#define __Pyx_mul_const_no_overflow(a, b, overflow) ((a) * (b))
#define __Pyx_div_no_overflow(a, b, overflow) ((a) / (b))
#define __Pyx_div_const_no_overflow(a, b, overflow) ((a) / (b))
/////////////// Common.init ///////////////
//@substitute: naming
// FIXME: Propagate the error here instead of just printing it.
if (unlikely(__Pyx_check_twos_complement())) {
PyErr_WriteUnraisable($module_cname);
}
/////////////// BaseCaseUnsigned.proto ///////////////
static CYTHON_INLINE {{UINT}} __Pyx_add_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow);
static CYTHON_INLINE {{UINT}} __Pyx_sub_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow);
static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow);
static CYTHON_INLINE {{UINT}} __Pyx_div_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow);
// Use these when b is known at compile time.
#define __Pyx_add_const_{{NAME}}_checking_overflow __Pyx_add_{{NAME}}_checking_overflow
#define __Pyx_sub_const_{{NAME}}_checking_overflow __Pyx_sub_{{NAME}}_checking_overflow
static CYTHON_INLINE {{UINT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} constant, int *overflow);
#define __Pyx_div_const_{{NAME}}_checking_overflow __Pyx_div_{{NAME}}_checking_overflow
/////////////// BaseCaseUnsigned ///////////////
static CYTHON_INLINE {{UINT}} __Pyx_add_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
{{UINT}} r = a + b;
*overflow |= r < a;
return r;
}
static CYTHON_INLINE {{UINT}} __Pyx_sub_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
{{UINT}} r = a - b;
*overflow |= r > a;
return r;
}
static CYTHON_INLINE {{UINT}} __Pyx_mul_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
if ((sizeof({{UINT}}) < sizeof(unsigned long))) {
unsigned long big_r = ((unsigned long) a) * ((unsigned long) b);
{{UINT}} r = ({{UINT}}) big_r;
*overflow |= big_r != r;
return r;
#ifdef HAVE_LONG_LONG
} else if ((sizeof({{UINT}}) < sizeof(unsigned PY_LONG_LONG))) {
unsigned PY_LONG_LONG big_r = ((unsigned PY_LONG_LONG) a) * ((unsigned PY_LONG_LONG) b);
{{UINT}} r = ({{UINT}}) big_r;
*overflow |= big_r != r;
return r;
#endif
} else {
{{UINT}} prod = a * b;
double dprod = ((double) a) * ((double) b);
// Overflow results in an error of at least 2^sizeof(UINT),
// whereas rounding represents an error on the order of 2^(sizeof(UINT)-53).
*overflow |= fabs(dprod - prod) > (__PYX_MAX({{UINT}}) / 2);
return prod;
}
}
static CYTHON_INLINE {{UINT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
if (b > 1) {
*overflow |= a > __PYX_MAX({{UINT}}) / b;
}
return a * b;
}
static CYTHON_INLINE {{UINT}} __Pyx_div_{{NAME}}_checking_overflow({{UINT}} a, {{UINT}} b, int *overflow) {
if (b == 0) {
*overflow |= 1;
return 0;
}
return a / b;
}
/////////////// BaseCaseSigned.proto ///////////////
static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow);
static CYTHON_INLINE {{INT}} __Pyx_sub_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow);
static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow);
static CYTHON_INLINE {{INT}} __Pyx_div_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow);
// Use when b is known at compile time.
static CYTHON_INLINE {{INT}} __Pyx_add_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow);
static CYTHON_INLINE {{INT}} __Pyx_sub_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow);
static CYTHON_INLINE {{INT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} constant, int *overflow);
#define __Pyx_div_const_{{NAME}}_checking_overflow __Pyx_div_{{NAME}}_checking_overflow
/////////////// BaseCaseSigned ///////////////
static CYTHON_INLINE {{INT}} __Pyx_add_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
if ((sizeof({{INT}}) < sizeof(long))) {
long big_r = ((long) a) + ((long) b);
{{INT}} r = ({{INT}}) big_r;
*overflow |= big_r != r;
return r;
#ifdef HAVE_LONG_LONG
} else if ((sizeof({{INT}}) < sizeof(PY_LONG_LONG))) {
PY_LONG_LONG big_r = ((PY_LONG_LONG) a) + ((PY_LONG_LONG) b);
{{INT}} r = ({{INT}}) big_r;
*overflow |= big_r != r;
return r;
#endif
} else {
// Signed overflow undefined, but unsigned overflow is well defined.
{{INT}} r = ({{INT}}) ((unsigned {{INT}}) a + (unsigned {{INT}}) b);
// Overflow happened if the operands have the same sign, but the result
// has opposite sign.
// sign(a) == sign(b) != sign(r)
{{INT}} sign_a = __PYX_SIGN_BIT({{INT}}) & a;
{{INT}} sign_b = __PYX_SIGN_BIT({{INT}}) & b;
{{INT}} sign_r = __PYX_SIGN_BIT({{INT}}) & r;
*overflow |= (sign_a == sign_b) & (sign_a != sign_r);
return r;
}
}
static CYTHON_INLINE {{INT}} __Pyx_add_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
if (b > 0) {
*overflow |= a > __PYX_MAX({{INT}}) - b;
} else if (b < 0) {
*overflow |= a < __PYX_MIN({{INT}}) - b;
}
return a + b;
}
static CYTHON_INLINE {{INT}} __Pyx_sub_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
*overflow |= b == __PYX_MIN({{INT}});
return __Pyx_add_{{NAME}}_checking_overflow(a, -b, overflow);
}
static CYTHON_INLINE {{INT}} __Pyx_sub_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
*overflow |= b == __PYX_MIN({{INT}});
return __Pyx_add_const_{{NAME}}_checking_overflow(a, -b, overflow);
}
static CYTHON_INLINE {{INT}} __Pyx_mul_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
if ((sizeof({{INT}}) < sizeof(long))) {
long big_r = ((long) a) * ((long) b);
{{INT}} r = ({{INT}}) big_r;
*overflow |= big_r != r;
return ({{INT}}) r;
#ifdef HAVE_LONG_LONG
} else if ((sizeof({{INT}}) < sizeof(PY_LONG_LONG))) {
PY_LONG_LONG big_r = ((PY_LONG_LONG) a) * ((PY_LONG_LONG) b);
{{INT}} r = ({{INT}}) big_r;
*overflow |= big_r != r;
return ({{INT}}) r;
#endif
} else {
{{INT}} prod = a * b;
double dprod = ((double) a) * ((double) b);
// Overflow results in an error of at least 2^sizeof(INT),
// whereas rounding represents an error on the order of 2^(sizeof(INT)-53).
*overflow |= fabs(dprod - prod) > (__PYX_MAX({{INT}}) / 2);
return prod;
}
}
static CYTHON_INLINE {{INT}} __Pyx_mul_const_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
if (b > 1) {
*overflow |= a > __PYX_MAX({{INT}}) / b;
*overflow |= a < __PYX_MIN({{INT}}) / b;
} else if (b == -1) {
*overflow |= a == __PYX_MIN({{INT}});
} else if (b < -1) {
*overflow |= a > __PYX_MIN({{INT}}) / b;
*overflow |= a < __PYX_MAX({{INT}}) / b;
}
return a * b;
}
static CYTHON_INLINE {{INT}} __Pyx_div_{{NAME}}_checking_overflow({{INT}} a, {{INT}} b, int *overflow) {
if (b == 0) {
*overflow |= 1;
return 0;
}
*overflow |= (a == __PYX_MIN({{INT}})) & (b == -1);
return a / b;
}
/////////////// SizeCheck.init ///////////////
//@substitute: naming
// FIXME: Propagate the error here instead of just printing it.
if (unlikely(__Pyx_check_sane_{{NAME}}())) {
PyErr_WriteUnraisable($module_cname);
}
/////////////// SizeCheck.proto ///////////////
static int __Pyx_check_sane_{{NAME}}(void) {
if (((sizeof({{TYPE}}) <= sizeof(int)) ||
#ifdef HAVE_LONG_LONG
(sizeof({{TYPE}}) == sizeof(PY_LONG_LONG)) ||
#endif
(sizeof({{TYPE}}) == sizeof(long)))) {
return 0;
} else {
PyErr_Format(PyExc_RuntimeError, \
"Bad size for int type %.{{max(60, len(TYPE))}}s: %d", "{{TYPE}}", (int) sizeof({{TYPE}}));
return 1;
}
}
/////////////// Binop.proto ///////////////
static CYTHON_INLINE {{TYPE}} __Pyx_{{BINOP}}_{{NAME}}_checking_overflow({{TYPE}} a, {{TYPE}} b, int *overflow);
/////////////// Binop ///////////////
static CYTHON_INLINE {{TYPE}} __Pyx_{{BINOP}}_{{NAME}}_checking_overflow({{TYPE}} a, {{TYPE}} b, int *overflow) {
if ((sizeof({{TYPE}}) < sizeof(int))) {
return __Pyx_{{BINOP}}_no_overflow(a, b, overflow);
} else if (__PYX_IS_UNSIGNED({{TYPE}})) {
if ((sizeof({{TYPE}}) == sizeof(unsigned int))) {
return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_int_checking_overflow(a, b, overflow);
} else if ((sizeof({{TYPE}}) == sizeof(unsigned long))) {
return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_long_checking_overflow(a, b, overflow);
#ifdef HAVE_LONG_LONG
} else if ((sizeof({{TYPE}}) == sizeof(unsigned PY_LONG_LONG))) {
return ({{TYPE}}) __Pyx_{{BINOP}}_unsigned_long_long_checking_overflow(a, b, overflow);
#endif
} else {
abort(); return 0; /* handled elsewhere */
}
} else {
if ((sizeof({{TYPE}}) == sizeof(int))) {
return ({{TYPE}}) __Pyx_{{BINOP}}_int_checking_overflow(a, b, overflow);
} else if ((sizeof({{TYPE}}) == sizeof(long))) {
return ({{TYPE}}) __Pyx_{{BINOP}}_long_checking_overflow(a, b, overflow);
#ifdef HAVE_LONG_LONG
} else if ((sizeof({{TYPE}}) == sizeof(PY_LONG_LONG))) {
return ({{TYPE}}) __Pyx_{{BINOP}}_long_long_checking_overflow(a, b, overflow);
#endif
} else {
abort(); return 0; /* handled elsewhere */
}
}
}
/////////////// LeftShift.proto ///////////////
static CYTHON_INLINE {{TYPE}} __Pyx_lshift_{{NAME}}_checking_overflow({{TYPE}} a, {{TYPE}} b, int *overflow) {
*overflow |=
#if {{SIGNED}}
(b < 0) |
#endif
(b > ({{TYPE}}) (8 * sizeof({{TYPE}}))) | (a > (__PYX_MAX({{TYPE}}) >> b));
return a << b;
}
#define __Pyx_lshift_const_{{NAME}}_checking_overflow __Pyx_lshift_{{NAME}}_checking_overflow
/////////////// UnaryNegOverflows.proto ///////////////
//FIXME: shouldn't the macro name be prefixed by "__Pyx_" ? Too late now, I guess...
// from intobject.c
#define UNARY_NEG_WOULD_OVERFLOW(x) \
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/Printing.c 0000644 0001751 0000171 00000011757 00000000000 020620 0 ustar 00runner docker 0000000 0000000 ////////////////////// Print.proto //////////////////////
//@substitute: naming
static int __Pyx_Print(PyObject*, PyObject *, int); /*proto*/
#if CYTHON_COMPILING_IN_PYPY || PY_MAJOR_VERSION >= 3
static PyObject* $print_function = 0;
static PyObject* $print_function_kwargs = 0;
#endif
////////////////////// Print.cleanup //////////////////////
//@substitute: naming
#if CYTHON_COMPILING_IN_PYPY || PY_MAJOR_VERSION >= 3
Py_CLEAR($print_function);
Py_CLEAR($print_function_kwargs);
#endif
////////////////////// Print //////////////////////
//@substitute: naming
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3
static PyObject *__Pyx_GetStdout(void) {
PyObject *f = PySys_GetObject((char *)"stdout");
if (!f) {
PyErr_SetString(PyExc_RuntimeError, "lost sys.stdout");
}
return f;
}
static int __Pyx_Print(PyObject* f, PyObject *arg_tuple, int newline) {
int i;
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
Py_INCREF(f);
for (i=0; i < PyTuple_GET_SIZE(arg_tuple); i++) {
PyObject* v;
if (PyFile_SoftSpace(f, 1)) {
if (PyFile_WriteString(" ", f) < 0)
goto error;
}
v = PyTuple_GET_ITEM(arg_tuple, i);
if (PyFile_WriteObject(v, f, Py_PRINT_RAW) < 0)
goto error;
if (PyString_Check(v)) {
char *s = PyString_AsString(v);
Py_ssize_t len = PyString_Size(v);
if (len > 0) {
// append soft-space if necessary (not using isspace() due to C/C++ problem on MacOS-X)
switch (s[len-1]) {
case ' ': break;
case '\f': case '\r': case '\n': case '\t': case '\v':
PyFile_SoftSpace(f, 0);
break;
default: break;
}
}
}
}
if (newline) {
if (PyFile_WriteString("\n", f) < 0)
goto error;
PyFile_SoftSpace(f, 0);
}
Py_DECREF(f);
return 0;
error:
Py_DECREF(f);
return -1;
}
#else /* Python 3 has a print function */
static int __Pyx_Print(PyObject* stream, PyObject *arg_tuple, int newline) {
PyObject* kwargs = 0;
PyObject* result = 0;
PyObject* end_string;
if (unlikely(!$print_function)) {
$print_function = PyObject_GetAttr($builtins_cname, PYIDENT("print"));
if (!$print_function)
return -1;
}
if (stream) {
kwargs = PyDict_New();
if (unlikely(!kwargs))
return -1;
if (unlikely(PyDict_SetItem(kwargs, PYIDENT("file"), stream) < 0))
goto bad;
if (!newline) {
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
goto bad;
if (PyDict_SetItem(kwargs, PYIDENT("end"), end_string) < 0) {
Py_DECREF(end_string);
goto bad;
}
Py_DECREF(end_string);
}
} else if (!newline) {
if (unlikely(!$print_function_kwargs)) {
$print_function_kwargs = PyDict_New();
if (unlikely(!$print_function_kwargs))
return -1;
end_string = PyUnicode_FromStringAndSize(" ", 1);
if (unlikely(!end_string))
return -1;
if (PyDict_SetItem($print_function_kwargs, PYIDENT("end"), end_string) < 0) {
Py_DECREF(end_string);
return -1;
}
Py_DECREF(end_string);
}
kwargs = $print_function_kwargs;
}
result = PyObject_Call($print_function, arg_tuple, kwargs);
if (unlikely(kwargs) && (kwargs != $print_function_kwargs))
Py_DECREF(kwargs);
if (!result)
return -1;
Py_DECREF(result);
return 0;
bad:
if (kwargs != $print_function_kwargs)
Py_XDECREF(kwargs);
return -1;
}
#endif
////////////////////// PrintOne.proto //////////////////////
//@requires: Print
static int __Pyx_PrintOne(PyObject* stream, PyObject *o); /*proto*/
////////////////////// PrintOne //////////////////////
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION < 3
static int __Pyx_PrintOne(PyObject* f, PyObject *o) {
if (!f) {
if (!(f = __Pyx_GetStdout()))
return -1;
}
Py_INCREF(f);
if (PyFile_SoftSpace(f, 0)) {
if (PyFile_WriteString(" ", f) < 0)
goto error;
}
if (PyFile_WriteObject(o, f, Py_PRINT_RAW) < 0)
goto error;
if (PyFile_WriteString("\n", f) < 0)
goto error;
Py_DECREF(f);
return 0;
error:
Py_DECREF(f);
return -1;
/* the line below is just to avoid C compiler
* warnings about unused functions */
return __Pyx_Print(f, NULL, 0);
}
#else /* Python 3 has a print function */
static int __Pyx_PrintOne(PyObject* stream, PyObject *o) {
int res;
PyObject* arg_tuple = PyTuple_Pack(1, o);
if (unlikely(!arg_tuple))
return -1;
res = __Pyx_Print(stream, arg_tuple, 1);
Py_DECREF(arg_tuple);
return res;
}
#endif
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/Profile.c 0000644 0001751 0000171 00000042570 00000000000 020423 0 ustar 00runner docker 0000000 0000000 /////////////// Profile.proto ///////////////
//@requires: Exceptions.c::PyErrFetchRestore
//@substitute: naming
// Note that cPython ignores PyTrace_EXCEPTION,
// but maybe some other profilers don't.
#ifndef CYTHON_PROFILE
#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
#define CYTHON_PROFILE 0
#else
#define CYTHON_PROFILE 1
#endif
#endif
#ifndef CYTHON_TRACE_NOGIL
#define CYTHON_TRACE_NOGIL 0
#else
#if CYTHON_TRACE_NOGIL && !defined(CYTHON_TRACE)
#define CYTHON_TRACE 1
#endif
#endif
#ifndef CYTHON_TRACE
#define CYTHON_TRACE 0
#endif
#if CYTHON_TRACE
#undef CYTHON_PROFILE_REUSE_FRAME
#endif
#ifndef CYTHON_PROFILE_REUSE_FRAME
#define CYTHON_PROFILE_REUSE_FRAME 0
#endif
#if CYTHON_PROFILE || CYTHON_TRACE
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
#if CYTHON_PROFILE_REUSE_FRAME
#define CYTHON_FRAME_MODIFIER static
#define CYTHON_FRAME_DEL(frame)
#else
#define CYTHON_FRAME_MODIFIER
#define CYTHON_FRAME_DEL(frame) Py_CLEAR(frame)
#endif
#define __Pyx_TraceDeclarations \
static PyCodeObject *$frame_code_cname = NULL; \
CYTHON_FRAME_MODIFIER PyFrameObject *$frame_cname = NULL; \
int __Pyx_use_tracing = 0;
#define __Pyx_TraceFrameInit(codeobj) \
if (codeobj) $frame_code_cname = (PyCodeObject*) codeobj;
#if PY_VERSION_HEX >= 0x030b00a2
#define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \
(unlikely((tstate)->cframe->use_tracing) && \
(!(check_tracing) || !(tstate)->tracing) && \
(!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc)))
#define __Pyx_EnterTracing(tstate) PyThreadState_EnterTracing(tstate)
#define __Pyx_LeaveTracing(tstate) PyThreadState_LeaveTracing(tstate)
#elif PY_VERSION_HEX >= 0x030a00b1
#define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \
(unlikely((tstate)->cframe->use_tracing) && \
(!(check_tracing) || !(tstate)->tracing) && \
(!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc)))
#define __Pyx_EnterTracing(tstate) \
do { tstate->tracing++; tstate->cframe->use_tracing = 0; } while (0)
#define __Pyx_LeaveTracing(tstate) \
do { \
tstate->tracing--; \
tstate->cframe->use_tracing = ((CYTHON_TRACE && tstate->c_tracefunc != NULL) \
|| tstate->c_profilefunc != NULL); \
} while (0)
#else
#define __Pyx_IsTracing(tstate, check_tracing, check_funcs) \
(unlikely((tstate)->use_tracing) && \
(!(check_tracing) || !(tstate)->tracing) && \
(!(check_funcs) || (tstate)->c_profilefunc || (CYTHON_TRACE && (tstate)->c_tracefunc)))
#define __Pyx_EnterTracing(tstate) \
do { tstate->tracing++; tstate->use_tracing = 0; } while (0)
#define __Pyx_LeaveTracing(tstate) \
do { \
tstate->tracing--; \
tstate->use_tracing = ((CYTHON_TRACE && tstate->c_tracefunc != NULL) \
|| tstate->c_profilefunc != NULL); \
} while (0)
#endif
#ifdef WITH_THREAD
#define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \
if (nogil) { \
if (CYTHON_TRACE_NOGIL) { \
PyThreadState *tstate; \
PyGILState_STATE state = PyGILState_Ensure(); \
tstate = __Pyx_PyThreadState_Current; \
if (__Pyx_IsTracing(tstate, 1, 1)) { \
__Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \
} \
PyGILState_Release(state); \
if (unlikely(__Pyx_use_tracing < 0)) goto_error; \
} \
} else { \
PyThreadState* tstate = PyThreadState_GET(); \
if (__Pyx_IsTracing(tstate, 1, 1)) { \
__Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \
if (unlikely(__Pyx_use_tracing < 0)) goto_error; \
} \
}
#else
#define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) \
{ PyThreadState* tstate = PyThreadState_GET(); \
if (__Pyx_IsTracing(tstate, 1, 1)) { \
__Pyx_use_tracing = __Pyx_TraceSetupAndCall(&$frame_code_cname, &$frame_cname, tstate, funcname, srcfile, firstlineno); \
if (unlikely(__Pyx_use_tracing < 0)) goto_error; \
} \
}
#endif
#define __Pyx_TraceException() \
if (likely(!__Pyx_use_tracing)); else { \
PyThreadState* tstate = __Pyx_PyThreadState_Current; \
if (__Pyx_IsTracing(tstate, 0, 1)) { \
__Pyx_EnterTracing(tstate); \
PyObject *exc_info = __Pyx_GetExceptionTuple(tstate); \
if (exc_info) { \
if (CYTHON_TRACE && tstate->c_tracefunc) \
tstate->c_tracefunc( \
tstate->c_traceobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \
tstate->c_profilefunc( \
tstate->c_profileobj, $frame_cname, PyTrace_EXCEPTION, exc_info); \
Py_DECREF(exc_info); \
} \
__Pyx_LeaveTracing(tstate); \
} \
}
static void __Pyx_call_return_trace_func(PyThreadState *tstate, PyFrameObject *frame, PyObject *result) {
PyObject *type, *value, *traceback;
__Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
__Pyx_EnterTracing(tstate);
if (CYTHON_TRACE && tstate->c_tracefunc)
tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_RETURN, result);
if (tstate->c_profilefunc)
tstate->c_profilefunc(tstate->c_profileobj, frame, PyTrace_RETURN, result);
CYTHON_FRAME_DEL(frame);
__Pyx_LeaveTracing(tstate);
__Pyx_ErrRestoreInState(tstate, type, value, traceback);
}
#ifdef WITH_THREAD
#define __Pyx_TraceReturn(result, nogil) \
if (likely(!__Pyx_use_tracing)); else { \
if (nogil) { \
if (CYTHON_TRACE_NOGIL) { \
PyThreadState *tstate; \
PyGILState_STATE state = PyGILState_Ensure(); \
tstate = __Pyx_PyThreadState_Current; \
if (__Pyx_IsTracing(tstate, 0, 0)) { \
__Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \
} \
PyGILState_Release(state); \
} \
} else { \
PyThreadState* tstate = __Pyx_PyThreadState_Current; \
if (__Pyx_IsTracing(tstate, 0, 0)) { \
__Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \
} \
} \
}
#else
#define __Pyx_TraceReturn(result, nogil) \
if (likely(!__Pyx_use_tracing)); else { \
PyThreadState* tstate = __Pyx_PyThreadState_Current; \
if (__Pyx_IsTracing(tstate, 0, 0)) { \
__Pyx_call_return_trace_func(tstate, $frame_cname, (PyObject*)result); \
} \
}
#endif
static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno); /*proto*/
static int __Pyx_TraceSetupAndCall(PyCodeObject** code, PyFrameObject** frame, PyThreadState* tstate, const char *funcname, const char *srcfile, int firstlineno); /*proto*/
#else
#define __Pyx_TraceDeclarations
#define __Pyx_TraceFrameInit(codeobj)
// mark error label as used to avoid compiler warnings
#define __Pyx_TraceCall(funcname, srcfile, firstlineno, nogil, goto_error) if ((1)); else goto_error;
#define __Pyx_TraceException()
#define __Pyx_TraceReturn(result, nogil)
#endif /* CYTHON_PROFILE */
#if CYTHON_TRACE
// see call_trace_protected() in CPython's ceval.c
static int __Pyx_call_line_trace_func(PyThreadState *tstate, PyFrameObject *frame, int lineno) {
int ret;
PyObject *type, *value, *traceback;
__Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
__Pyx_PyFrame_SetLineNumber(frame, lineno);
__Pyx_EnterTracing(tstate);
ret = tstate->c_tracefunc(tstate->c_traceobj, frame, PyTrace_LINE, NULL);
__Pyx_LeaveTracing(tstate);
if (likely(!ret)) {
__Pyx_ErrRestoreInState(tstate, type, value, traceback);
} else {
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
}
return ret;
}
#ifdef WITH_THREAD
#define __Pyx_TraceLine(lineno, nogil, goto_error) \
if (likely(!__Pyx_use_tracing)); else { \
if (nogil) { \
if (CYTHON_TRACE_NOGIL) { \
int ret = 0; \
PyThreadState *tstate; \
PyGILState_STATE state = PyGILState_Ensure(); \
tstate = __Pyx_PyThreadState_Current; \
if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \
ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \
} \
PyGILState_Release(state); \
if (unlikely(ret)) goto_error; \
} \
} else { \
PyThreadState* tstate = __Pyx_PyThreadState_Current; \
if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \
int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \
if (unlikely(ret)) goto_error; \
} \
} \
}
#else
#define __Pyx_TraceLine(lineno, nogil, goto_error) \
if (likely(!__Pyx_use_tracing)); else { \
PyThreadState* tstate = __Pyx_PyThreadState_Current; \
if (__Pyx_IsTracing(tstate, 0, 0) && tstate->c_tracefunc && $frame_cname->f_trace) { \
int ret = __Pyx_call_line_trace_func(tstate, $frame_cname, lineno); \
if (unlikely(ret)) goto_error; \
} \
}
#endif
#else
// mark error label as used to avoid compiler warnings
#define __Pyx_TraceLine(lineno, nogil, goto_error) if ((1)); else goto_error;
#endif
/////////////// Profile ///////////////
//@substitute: naming
#if CYTHON_PROFILE
static int __Pyx_TraceSetupAndCall(PyCodeObject** code,
PyFrameObject** frame,
PyThreadState* tstate,
const char *funcname,
const char *srcfile,
int firstlineno) {
PyObject *type, *value, *traceback;
int retval;
if (*frame == NULL || !CYTHON_PROFILE_REUSE_FRAME) {
if (*code == NULL) {
*code = __Pyx_createFrameCodeObject(funcname, srcfile, firstlineno);
if (*code == NULL) return 0;
}
*frame = PyFrame_New(
tstate, /*PyThreadState *tstate*/
*code, /*PyCodeObject *code*/
$moddict_cname, /*PyObject *globals*/
0 /*PyObject *locals*/
);
if (*frame == NULL) return 0;
if (CYTHON_TRACE && (*frame)->f_trace == NULL) {
// this enables "f_lineno" lookup, at least in CPython ...
Py_INCREF(Py_None);
(*frame)->f_trace = Py_None;
}
#if PY_VERSION_HEX < 0x030400B1
} else {
(*frame)->f_tstate = tstate;
#endif
}
__Pyx_PyFrame_SetLineNumber(*frame, firstlineno);
retval = 1;
__Pyx_EnterTracing(tstate);
__Pyx_ErrFetchInState(tstate, &type, &value, &traceback);
#if CYTHON_TRACE
if (tstate->c_tracefunc)
retval = tstate->c_tracefunc(tstate->c_traceobj, *frame, PyTrace_CALL, NULL) == 0;
if (retval && tstate->c_profilefunc)
#endif
retval = tstate->c_profilefunc(tstate->c_profileobj, *frame, PyTrace_CALL, NULL) == 0;
__Pyx_LeaveTracing(tstate);
if (retval) {
__Pyx_ErrRestoreInState(tstate, type, value, traceback);
return __Pyx_IsTracing(tstate, 0, 0) && retval;
} else {
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
return -1;
}
}
static PyCodeObject *__Pyx_createFrameCodeObject(const char *funcname, const char *srcfile, int firstlineno) {
PyCodeObject *py_code = 0;
#if PY_MAJOR_VERSION >= 3
py_code = PyCode_NewEmpty(srcfile, funcname, firstlineno);
// make CPython use a fresh dict for "f_locals" at need (see GH #1836)
if (likely(py_code)) {
py_code->co_flags |= CO_OPTIMIZED | CO_NEWLOCALS;
}
#else
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
py_funcname = PyString_FromString(funcname);
if (unlikely(!py_funcname)) goto bad;
py_srcfile = PyString_FromString(srcfile);
if (unlikely(!py_srcfile)) goto bad;
py_code = PyCode_New(
0, /*int argcount,*/
0, /*int nlocals,*/
0, /*int stacksize,*/
// make CPython use a fresh dict for "f_locals" at need (see GH #1836)
CO_OPTIMIZED | CO_NEWLOCALS, /*int flags,*/
$empty_bytes, /*PyObject *code,*/
$empty_tuple, /*PyObject *consts,*/
$empty_tuple, /*PyObject *names,*/
$empty_tuple, /*PyObject *varnames,*/
$empty_tuple, /*PyObject *freevars,*/
$empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
firstlineno, /*int firstlineno,*/
$empty_bytes /*PyObject *lnotab*/
);
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
#endif
return py_code;
}
#endif /* CYTHON_PROFILE */
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/StringTools.c 0000644 0001751 0000171 00000122305 00000000000 021305 0 ustar 00runner docker 0000000 0000000
//////////////////// IncludeStringH.proto ////////////////////
#include
//////////////////// IncludeCppStringH.proto ////////////////////
#include
//////////////////// InitStrings.proto ////////////////////
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /*proto*/
//////////////////// InitStrings ////////////////////
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else /* Python 3+ has unicode identifiers */
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
// initialise cached hash value
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
//////////////////// BytesContains.proto ////////////////////
static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character); /*proto*/
//////////////////// BytesContains ////////////////////
//@requires: IncludeStringH
static CYTHON_INLINE int __Pyx_BytesContains(PyObject* bytes, char character) {
const Py_ssize_t length = PyBytes_GET_SIZE(bytes);
char* char_start = PyBytes_AS_STRING(bytes);
return memchr(char_start, (unsigned char)character, (size_t)length) != NULL;
}
//////////////////// PyUCS4InUnicode.proto ////////////////////
static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character); /*proto*/
//////////////////// PyUCS4InUnicode ////////////////////
#if PY_VERSION_HEX < 0x03090000 || (defined(PyUnicode_WCHAR_KIND) && defined(PyUnicode_AS_UNICODE))
#if PY_VERSION_HEX < 0x03090000
#define __Pyx_PyUnicode_AS_UNICODE(op) PyUnicode_AS_UNICODE(op)
#define __Pyx_PyUnicode_GET_SIZE(op) PyUnicode_GET_SIZE(op)
#else
// Avoid calling deprecated C-API functions in Py3.9+ that PEP-623 schedules for removal in Py3.12.
// https://www.python.org/dev/peps/pep-0623/
#define __Pyx_PyUnicode_AS_UNICODE(op) (((PyASCIIObject *)(op))->wstr)
#define __Pyx_PyUnicode_GET_SIZE(op) ((PyCompactUnicodeObject *)(op))->wstr_length
#endif
#if !defined(Py_UNICODE_SIZE) || Py_UNICODE_SIZE == 2
static int __Pyx_PyUnicodeBufferContainsUCS4_SP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) {
/* handle surrogate pairs for Py_UNICODE buffers in 16bit Unicode builds */
Py_UNICODE high_val, low_val;
Py_UNICODE* pos;
high_val = (Py_UNICODE) (0xD800 | (((character - 0x10000) >> 10) & ((1<<10)-1)));
low_val = (Py_UNICODE) (0xDC00 | ( (character - 0x10000) & ((1<<10)-1)));
for (pos=buffer; pos < buffer+length-1; pos++) {
if (unlikely((high_val == pos[0]) & (low_val == pos[1]))) return 1;
}
return 0;
}
#endif
static int __Pyx_PyUnicodeBufferContainsUCS4_BMP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) {
Py_UNICODE uchar;
Py_UNICODE* pos;
uchar = (Py_UNICODE) character;
for (pos=buffer; pos < buffer+length; pos++) {
if (unlikely(uchar == pos[0])) return 1;
}
return 0;
}
#endif
static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character) {
#if CYTHON_PEP393_ENABLED
const int kind = PyUnicode_KIND(unicode);
#ifdef PyUnicode_WCHAR_KIND
if (likely(kind != PyUnicode_WCHAR_KIND))
#endif
{
Py_ssize_t i;
const void* udata = PyUnicode_DATA(unicode);
const Py_ssize_t length = PyUnicode_GET_LENGTH(unicode);
for (i=0; i < length; i++) {
if (unlikely(character == PyUnicode_READ(kind, udata, i))) return 1;
}
return 0;
}
#elif PY_VERSION_HEX >= 0x03090000
#error Cannot use "UChar in Unicode" in Python 3.9 without PEP-393 unicode strings.
#elif !defined(PyUnicode_AS_UNICODE)
#error Cannot use "UChar in Unicode" in Python < 3.9 without Py_UNICODE support.
#endif
#if PY_VERSION_HEX < 0x03090000 || (defined(PyUnicode_WCHAR_KIND) && defined(PyUnicode_AS_UNICODE))
#if !defined(Py_UNICODE_SIZE) || Py_UNICODE_SIZE == 2
if ((sizeof(Py_UNICODE) == 2) && unlikely(character > 65535)) {
return __Pyx_PyUnicodeBufferContainsUCS4_SP(
__Pyx_PyUnicode_AS_UNICODE(unicode),
__Pyx_PyUnicode_GET_SIZE(unicode),
character);
} else
#endif
{
return __Pyx_PyUnicodeBufferContainsUCS4_BMP(
__Pyx_PyUnicode_AS_UNICODE(unicode),
__Pyx_PyUnicode_GET_SIZE(unicode),
character);
}
#endif
}
//////////////////// PyUnicodeContains.proto ////////////////////
static CYTHON_INLINE int __Pyx_PyUnicode_ContainsTF(PyObject* substring, PyObject* text, int eq) {
int result = PyUnicode_Contains(text, substring);
return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
}
//////////////////// CStringEquals.proto ////////////////////
static CYTHON_INLINE int __Pyx_StrEq(const char *, const char *); /*proto*/
//////////////////// CStringEquals ////////////////////
static CYTHON_INLINE int __Pyx_StrEq(const char *s1, const char *s2) {
while (*s1 != '\0' && *s1 == *s2) { s1++; s2++; }
return *s1 == *s2;
}
//////////////////// StrEquals.proto ////////////////////
//@requires: BytesEquals
//@requires: UnicodeEquals
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
//////////////////// UnicodeEquals.proto ////////////////////
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
//////////////////// UnicodeEquals ////////////////////
//@requires: BytesEquals
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
/* as done by PyObject_RichCompareBool(); also catches the (interned) empty string */
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
// len(s1) == len(s2) >= 1 (empty string is interned, and "s1 is not s2")
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
//////////////////// BytesEquals.proto ////////////////////
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /*proto*/
//////////////////// BytesEquals ////////////////////
//@requires: IncludeStringH
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
/* as done by PyObject_RichCompareBool(); also catches the (interned) empty string */
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
// len(s1) == len(s2) >= 1 (empty string is interned, and "s1 is not s2")
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
//////////////////// GetItemIntByteArray.proto ////////////////////
#define __Pyx_GetItemInt_ByteArray(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_ByteArray_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "bytearray index out of range"), -1))
static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i,
int wraparound, int boundscheck);
//////////////////// GetItemIntByteArray ////////////////////
static CYTHON_INLINE int __Pyx_GetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i,
int wraparound, int boundscheck) {
Py_ssize_t length;
if (wraparound | boundscheck) {
length = PyByteArray_GET_SIZE(string);
if (wraparound & unlikely(i < 0)) i += length;
if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
return (unsigned char) (PyByteArray_AS_STRING(string)[i]);
} else {
PyErr_SetString(PyExc_IndexError, "bytearray index out of range");
return -1;
}
} else {
return (unsigned char) (PyByteArray_AS_STRING(string)[i]);
}
}
//////////////////// SetItemIntByteArray.proto ////////////////////
#define __Pyx_SetItemInt_ByteArray(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_SetItemInt_ByteArray_Fast(o, (Py_ssize_t)i, v, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "bytearray index out of range"), -1))
static CYTHON_INLINE int __Pyx_SetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, unsigned char v,
int wraparound, int boundscheck);
//////////////////// SetItemIntByteArray ////////////////////
static CYTHON_INLINE int __Pyx_SetItemInt_ByteArray_Fast(PyObject* string, Py_ssize_t i, unsigned char v,
int wraparound, int boundscheck) {
Py_ssize_t length;
if (wraparound | boundscheck) {
length = PyByteArray_GET_SIZE(string);
if (wraparound & unlikely(i < 0)) i += length;
if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
PyByteArray_AS_STRING(string)[i] = (char) v;
return 0;
} else {
PyErr_SetString(PyExc_IndexError, "bytearray index out of range");
return -1;
}
} else {
PyByteArray_AS_STRING(string)[i] = (char) v;
return 0;
}
}
//////////////////// GetItemIntUnicode.proto ////////////////////
#define __Pyx_GetItemInt_Unicode(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck) \
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ? \
__Pyx_GetItemInt_Unicode_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) : \
(PyErr_SetString(PyExc_IndexError, "string index out of range"), (Py_UCS4)-1))
static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i,
int wraparound, int boundscheck);
//////////////////// GetItemIntUnicode ////////////////////
static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i,
int wraparound, int boundscheck) {
Py_ssize_t length;
if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return (Py_UCS4)-1;
if (wraparound | boundscheck) {
length = __Pyx_PyUnicode_GET_LENGTH(ustring);
if (wraparound & unlikely(i < 0)) i += length;
if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
return __Pyx_PyUnicode_READ_CHAR(ustring, i);
} else {
PyErr_SetString(PyExc_IndexError, "string index out of range");
return (Py_UCS4)-1;
}
} else {
return __Pyx_PyUnicode_READ_CHAR(ustring, i);
}
}
/////////////// decode_c_string_utf16.proto ///////////////
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/////////////// decode_cpp_string.proto ///////////////
//@requires: IncludeCppStringH
//@requires: decode_c_bytes
static CYTHON_INLINE PyObject* __Pyx_decode_cpp_string(
std::string cppstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
return __Pyx_decode_c_bytes(
cppstring.data(), cppstring.size(), start, stop, encoding, errors, decode_func);
}
/////////////// decode_c_string.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/////////////// decode_c_string ///////////////
//@requires: IncludeStringH
//@requires: decode_c_string_utf16
//@substitute: naming
/* duplicate code to avoid calling strlen() if start >= 0 and stop >= 0 */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef($empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/////////////// decode_c_bytes.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes(
const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/////////////// decode_c_bytes ///////////////
//@requires: decode_c_string_utf16
//@substitute: naming
static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes(
const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
if (unlikely((start < 0) | (stop < 0))) {
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (stop > length)
stop = length;
if (unlikely(stop <= start))
return __Pyx_NewRef($empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/////////////// decode_bytes.proto ///////////////
//@requires: decode_c_bytes
static CYTHON_INLINE PyObject* __Pyx_decode_bytes(
PyObject* string, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
return __Pyx_decode_c_bytes(
PyBytes_AS_STRING(string), PyBytes_GET_SIZE(string),
start, stop, encoding, errors, decode_func);
}
/////////////// decode_bytearray.proto ///////////////
//@requires: decode_c_bytes
static CYTHON_INLINE PyObject* __Pyx_decode_bytearray(
PyObject* string, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
return __Pyx_decode_c_bytes(
PyByteArray_AS_STRING(string), PyByteArray_GET_SIZE(string),
start, stop, encoding, errors, decode_func);
}
/////////////// PyUnicode_Substring.proto ///////////////
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring(
PyObject* text, Py_ssize_t start, Py_ssize_t stop);
/////////////// PyUnicode_Substring ///////////////
//@substitute: naming
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring(
PyObject* text, Py_ssize_t start, Py_ssize_t stop) {
Py_ssize_t length;
if (unlikely(__Pyx_PyUnicode_READY(text) == -1)) return NULL;
length = __Pyx_PyUnicode_GET_LENGTH(text);
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
else if (stop > length)
stop = length;
if (stop <= start)
return __Pyx_NewRef($empty_unicode);
#if CYTHON_PEP393_ENABLED
return PyUnicode_FromKindAndData(PyUnicode_KIND(text),
PyUnicode_1BYTE_DATA(text) + start*PyUnicode_KIND(text), stop-start);
#else
return PyUnicode_FromUnicode(PyUnicode_AS_UNICODE(text)+start, stop-start);
#endif
}
/////////////// py_unicode_istitle.proto ///////////////
// Py_UNICODE_ISTITLE() doesn't match unicode.istitle() as the latter
// additionally allows character that comply with Py_UNICODE_ISUPPER()
#if PY_VERSION_HEX < 0x030200A2
static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UNICODE uchar)
#else
static CYTHON_INLINE int __Pyx_Py_UNICODE_ISTITLE(Py_UCS4 uchar)
#endif
{
return Py_UNICODE_ISTITLE(uchar) || Py_UNICODE_ISUPPER(uchar);
}
/////////////// unicode_tailmatch.proto ///////////////
static int __Pyx_PyUnicode_Tailmatch(
PyObject* s, PyObject* substr, Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/
/////////////// unicode_tailmatch ///////////////
// Python's unicode.startswith() and unicode.endswith() support a
// tuple of prefixes/suffixes, whereas it's much more common to
// test for a single unicode string.
static int __Pyx_PyUnicode_TailmatchTuple(PyObject* s, PyObject* substrings,
Py_ssize_t start, Py_ssize_t end, int direction) {
Py_ssize_t i, count = PyTuple_GET_SIZE(substrings);
for (i = 0; i < count; i++) {
Py_ssize_t result;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
result = PyUnicode_Tailmatch(s, PyTuple_GET_ITEM(substrings, i),
start, end, direction);
#else
PyObject* sub = PySequence_ITEM(substrings, i);
if (unlikely(!sub)) return -1;
result = PyUnicode_Tailmatch(s, sub, start, end, direction);
Py_DECREF(sub);
#endif
if (result) {
return (int) result;
}
}
return 0;
}
static int __Pyx_PyUnicode_Tailmatch(PyObject* s, PyObject* substr,
Py_ssize_t start, Py_ssize_t end, int direction) {
if (unlikely(PyTuple_Check(substr))) {
return __Pyx_PyUnicode_TailmatchTuple(s, substr, start, end, direction);
}
return (int) PyUnicode_Tailmatch(s, substr, start, end, direction);
}
/////////////// bytes_tailmatch.proto ///////////////
static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg,
Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/
static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr,
Py_ssize_t start, Py_ssize_t end, int direction); /*proto*/
/////////////// bytes_tailmatch ///////////////
static int __Pyx_PyBytes_SingleTailmatch(PyObject* self, PyObject* arg,
Py_ssize_t start, Py_ssize_t end, int direction) {
const char* self_ptr = PyBytes_AS_STRING(self);
Py_ssize_t self_len = PyBytes_GET_SIZE(self);
const char* sub_ptr;
Py_ssize_t sub_len;
int retval;
Py_buffer view;
view.obj = NULL;
if ( PyBytes_Check(arg) ) {
sub_ptr = PyBytes_AS_STRING(arg);
sub_len = PyBytes_GET_SIZE(arg);
}
#if PY_MAJOR_VERSION < 3
// Python 2.x allows mixing unicode and str
else if ( PyUnicode_Check(arg) ) {
return (int) PyUnicode_Tailmatch(self, arg, start, end, direction);
}
#endif
else {
if (unlikely(PyObject_GetBuffer(self, &view, PyBUF_SIMPLE) == -1))
return -1;
sub_ptr = (const char*) view.buf;
sub_len = view.len;
}
if (end > self_len)
end = self_len;
else if (end < 0)
end += self_len;
if (end < 0)
end = 0;
if (start < 0)
start += self_len;
if (start < 0)
start = 0;
if (direction > 0) {
/* endswith */
if (end-sub_len > start)
start = end - sub_len;
}
if (start + sub_len <= end)
retval = !memcmp(self_ptr+start, sub_ptr, (size_t)sub_len);
else
retval = 0;
if (view.obj)
PyBuffer_Release(&view);
return retval;
}
static int __Pyx_PyBytes_TailmatchTuple(PyObject* self, PyObject* substrings,
Py_ssize_t start, Py_ssize_t end, int direction) {
Py_ssize_t i, count = PyTuple_GET_SIZE(substrings);
for (i = 0; i < count; i++) {
int result;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
result = __Pyx_PyBytes_SingleTailmatch(self, PyTuple_GET_ITEM(substrings, i),
start, end, direction);
#else
PyObject* sub = PySequence_ITEM(substrings, i);
if (unlikely(!sub)) return -1;
result = __Pyx_PyBytes_SingleTailmatch(self, sub, start, end, direction);
Py_DECREF(sub);
#endif
if (result) {
return result;
}
}
return 0;
}
static int __Pyx_PyBytes_Tailmatch(PyObject* self, PyObject* substr,
Py_ssize_t start, Py_ssize_t end, int direction) {
if (unlikely(PyTuple_Check(substr))) {
return __Pyx_PyBytes_TailmatchTuple(self, substr, start, end, direction);
}
return __Pyx_PyBytes_SingleTailmatch(self, substr, start, end, direction);
}
/////////////// str_tailmatch.proto ///////////////
static CYTHON_INLINE int __Pyx_PyStr_Tailmatch(PyObject* self, PyObject* arg, Py_ssize_t start,
Py_ssize_t end, int direction); /*proto*/
/////////////// str_tailmatch ///////////////
//@requires: bytes_tailmatch
//@requires: unicode_tailmatch
static CYTHON_INLINE int __Pyx_PyStr_Tailmatch(PyObject* self, PyObject* arg, Py_ssize_t start,
Py_ssize_t end, int direction)
{
// We do not use a C compiler macro here to avoid "unused function"
// warnings for the *_Tailmatch() function that is not being used in
// the specific CPython version. The C compiler will generate the same
// code anyway, and will usually just remove the unused function.
if (PY_MAJOR_VERSION < 3)
return __Pyx_PyBytes_Tailmatch(self, arg, start, end, direction);
else
return __Pyx_PyUnicode_Tailmatch(self, arg, start, end, direction);
}
/////////////// bytes_index.proto ///////////////
static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds); /*proto*/
/////////////// bytes_index ///////////////
static CYTHON_INLINE char __Pyx_PyBytes_GetItemInt(PyObject* bytes, Py_ssize_t index, int check_bounds) {
if (index < 0)
index += PyBytes_GET_SIZE(bytes);
if (check_bounds) {
Py_ssize_t size = PyBytes_GET_SIZE(bytes);
if (unlikely(!__Pyx_is_valid_index(index, size))) {
PyErr_SetString(PyExc_IndexError, "string index out of range");
return (char) -1;
}
}
return PyBytes_AS_STRING(bytes)[index];
}
//////////////////// StringJoin.proto ////////////////////
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyString_Join __Pyx_PyBytes_Join
#define __Pyx_PyBaseString_Join(s, v) (PyUnicode_CheckExact(s) ? PyUnicode_Join(s, v) : __Pyx_PyBytes_Join(s, v))
#else
#define __Pyx_PyString_Join PyUnicode_Join
#define __Pyx_PyBaseString_Join PyUnicode_Join
#endif
#if CYTHON_COMPILING_IN_CPYTHON
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyBytes_Join _PyString_Join
#else
#define __Pyx_PyBytes_Join _PyBytes_Join
#endif
#else
static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values); /*proto*/
#endif
//////////////////// StringJoin ////////////////////
#if !CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyBytes_Join(PyObject* sep, PyObject* values) {
return PyObject_CallMethodObjArgs(sep, PYIDENT("join"), values, NULL);
}
#endif
/////////////// JoinPyUnicode.proto ///////////////
static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength,
Py_UCS4 max_char);
/////////////// JoinPyUnicode ///////////////
//@requires: IncludeStringH
//@substitute: naming
static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength,
CYTHON_UNUSED Py_UCS4 max_char) {
#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
PyObject *result_uval;
int result_ukind;
Py_ssize_t i, char_pos;
void *result_udata;
#if CYTHON_PEP393_ENABLED
// Py 3.3+ (post PEP-393)
result_uval = PyUnicode_New(result_ulength, max_char);
if (unlikely(!result_uval)) return NULL;
result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND;
result_udata = PyUnicode_DATA(result_uval);
#else
// Py 2.x/3.2 (pre PEP-393)
result_uval = PyUnicode_FromUnicode(NULL, result_ulength);
if (unlikely(!result_uval)) return NULL;
result_ukind = sizeof(Py_UNICODE);
result_udata = PyUnicode_AS_UNICODE(result_uval);
#endif
char_pos = 0;
for (i=0; i < value_count; i++) {
int ukind;
Py_ssize_t ulength;
void *udata;
PyObject *uval = PyTuple_GET_ITEM(value_tuple, i);
if (unlikely(__Pyx_PyUnicode_READY(uval)))
goto bad;
ulength = __Pyx_PyUnicode_GET_LENGTH(uval);
if (unlikely(!ulength))
continue;
if (unlikely(char_pos + ulength < 0))
goto overflow;
ukind = __Pyx_PyUnicode_KIND(uval);
udata = __Pyx_PyUnicode_DATA(uval);
if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) {
memcpy((char *)result_udata + char_pos * result_ukind, udata, (size_t) (ulength * result_ukind));
} else {
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030300F0 || defined(_PyUnicode_FastCopyCharacters)
_PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength);
#else
Py_ssize_t j;
for (j=0; j < ulength; j++) {
Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j);
__Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar);
}
#endif
}
char_pos += ulength;
}
return result_uval;
overflow:
PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string");
bad:
Py_DECREF(result_uval);
return NULL;
#else
// non-CPython fallback
result_ulength++;
value_count++;
return PyUnicode_Join($empty_unicode, value_tuple);
#endif
}
/////////////// BuildPyUnicode.proto ///////////////
static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength,
int prepend_sign, char padding_char);
/////////////// BuildPyUnicode ///////////////
// Create a PyUnicode object from an ASCII char*, e.g. a formatted number.
static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength,
int prepend_sign, char padding_char) {
PyObject *uval;
Py_ssize_t uoffset = ulength - clength;
#if CYTHON_USE_UNICODE_INTERNALS
Py_ssize_t i;
#if CYTHON_PEP393_ENABLED
// Py 3.3+ (post PEP-393)
void *udata;
uval = PyUnicode_New(ulength, 127);
if (unlikely(!uval)) return NULL;
udata = PyUnicode_DATA(uval);
#else
// Py 2.x/3.2 (pre PEP-393)
Py_UNICODE *udata;
uval = PyUnicode_FromUnicode(NULL, ulength);
if (unlikely(!uval)) return NULL;
udata = PyUnicode_AS_UNICODE(uval);
#endif
if (uoffset > 0) {
i = 0;
if (prepend_sign) {
__Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-');
i++;
}
for (; i < uoffset; i++) {
__Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char);
}
}
for (i=0; i < clength; i++) {
__Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]);
}
#else
// non-CPython
{
PyObject *sign = NULL, *padding = NULL;
uval = NULL;
if (uoffset > 0) {
prepend_sign = !!prepend_sign;
if (uoffset > prepend_sign) {
padding = PyUnicode_FromOrdinal(padding_char);
if (likely(padding) && uoffset > prepend_sign + 1) {
PyObject *tmp;
PyObject *repeat = PyInt_FromSize_t(uoffset - prepend_sign);
if (unlikely(!repeat)) goto done_or_error;
tmp = PyNumber_Multiply(padding, repeat);
Py_DECREF(repeat);
Py_DECREF(padding);
padding = tmp;
}
if (unlikely(!padding)) goto done_or_error;
}
if (prepend_sign) {
sign = PyUnicode_FromOrdinal('-');
if (unlikely(!sign)) goto done_or_error;
}
}
uval = PyUnicode_DecodeASCII(chars, clength, NULL);
if (likely(uval) && padding) {
PyObject *tmp = PyNumber_Add(padding, uval);
Py_DECREF(uval);
uval = tmp;
}
if (likely(uval) && sign) {
PyObject *tmp = PyNumber_Add(sign, uval);
Py_DECREF(uval);
uval = tmp;
}
done_or_error:
Py_XDECREF(padding);
Py_XDECREF(sign);
}
#endif
return uval;
}
//////////////////// ByteArrayAppendObject.proto ////////////////////
static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyObject* value);
//////////////////// ByteArrayAppendObject ////////////////////
//@requires: ByteArrayAppend
static CYTHON_INLINE int __Pyx_PyByteArray_AppendObject(PyObject* bytearray, PyObject* value) {
Py_ssize_t ival;
#if PY_MAJOR_VERSION < 3
if (unlikely(PyString_Check(value))) {
if (unlikely(PyString_GET_SIZE(value) != 1)) {
PyErr_SetString(PyExc_ValueError, "string must be of size 1");
return -1;
}
ival = (unsigned char) (PyString_AS_STRING(value)[0]);
} else
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(value)) && likely(Py_SIZE(value) == 1 || Py_SIZE(value) == 0)) {
if (Py_SIZE(value) == 0) {
ival = 0;
} else {
ival = ((PyLongObject*)value)->ob_digit[0];
if (unlikely(ival > 255)) goto bad_range;
}
} else
#endif
{
// CPython calls PyNumber_Index() internally
ival = __Pyx_PyIndex_AsSsize_t(value);
if (unlikely(!__Pyx_is_valid_index(ival, 256))) {
if (ival == -1 && PyErr_Occurred())
return -1;
goto bad_range;
}
}
return __Pyx_PyByteArray_Append(bytearray, ival);
bad_range:
PyErr_SetString(PyExc_ValueError, "byte must be in range(0, 256)");
return -1;
}
//////////////////// ByteArrayAppend.proto ////////////////////
static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value);
//////////////////// ByteArrayAppend ////////////////////
//@requires: ObjectHandling.c::PyObjectCallMethod1
static CYTHON_INLINE int __Pyx_PyByteArray_Append(PyObject* bytearray, int value) {
PyObject *pyval, *retval;
#if CYTHON_COMPILING_IN_CPYTHON
if (likely(__Pyx_is_valid_index(value, 256))) {
Py_ssize_t n = Py_SIZE(bytearray);
if (likely(n != PY_SSIZE_T_MAX)) {
if (unlikely(PyByteArray_Resize(bytearray, n + 1) < 0))
return -1;
PyByteArray_AS_STRING(bytearray)[n] = value;
return 0;
}
} else {
PyErr_SetString(PyExc_ValueError, "byte must be in range(0, 256)");
return -1;
}
#endif
pyval = PyInt_FromLong(value);
if (unlikely(!pyval))
return -1;
retval = __Pyx_PyObject_CallMethod1(bytearray, PYIDENT("append"), pyval);
Py_DECREF(pyval);
if (unlikely(!retval))
return -1;
Py_DECREF(retval);
return 0;
}
//////////////////// PyObjectFormat.proto ////////////////////
#if CYTHON_USE_UNICODE_WRITER
static PyObject* __Pyx_PyObject_Format(PyObject* s, PyObject* f);
#else
#define __Pyx_PyObject_Format(s, f) PyObject_Format(s, f)
#endif
//////////////////// PyObjectFormat ////////////////////
#if CYTHON_USE_UNICODE_WRITER
static PyObject* __Pyx_PyObject_Format(PyObject* obj, PyObject* format_spec) {
int ret;
_PyUnicodeWriter writer;
if (likely(PyFloat_CheckExact(obj))) {
// copied from CPython 3.5 "float__format__()" in floatobject.c
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000
_PyUnicodeWriter_Init(&writer, 0);
#else
_PyUnicodeWriter_Init(&writer);
#endif
ret = _PyFloat_FormatAdvancedWriter(
&writer,
obj,
format_spec, 0, PyUnicode_GET_LENGTH(format_spec));
} else if (likely(PyLong_CheckExact(obj))) {
// copied from CPython 3.5 "long__format__()" in longobject.c
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x03040000
_PyUnicodeWriter_Init(&writer, 0);
#else
_PyUnicodeWriter_Init(&writer);
#endif
ret = _PyLong_FormatAdvancedWriter(
&writer,
obj,
format_spec, 0, PyUnicode_GET_LENGTH(format_spec));
} else {
return PyObject_Format(obj, format_spec);
}
if (unlikely(ret == -1)) {
_PyUnicodeWriter_Dealloc(&writer);
return NULL;
}
return _PyUnicodeWriter_Finish(&writer);
}
#endif
//////////////////// PyObjectFormatSimple.proto ////////////////////
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyObject_FormatSimple(s, f) ( \
likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \
PyObject_Format(s, f))
#elif PY_MAJOR_VERSION < 3
// str is common in Py2, but formatting must return a Unicode string
#define __Pyx_PyObject_FormatSimple(s, f) ( \
likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \
likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") : \
PyObject_Format(s, f))
#elif CYTHON_USE_TYPE_SLOTS
// Py3 nicely returns unicode strings from str() which makes this quite efficient for builtin types
#define __Pyx_PyObject_FormatSimple(s, f) ( \
likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \
likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_str(s) : \
likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_str(s) : \
PyObject_Format(s, f))
#else
#define __Pyx_PyObject_FormatSimple(s, f) ( \
likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) : \
PyObject_Format(s, f))
#endif
//////////////////// PyObjectFormatAndDecref.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f);
static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f);
//////////////////// PyObjectFormatAndDecref ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatSimpleAndDecref(PyObject* s, PyObject* f) {
if (unlikely(!s)) return NULL;
if (likely(PyUnicode_CheckExact(s))) return s;
#if PY_MAJOR_VERSION < 3
// str is common in Py2, but formatting must return a Unicode string
if (likely(PyString_CheckExact(s))) {
PyObject *result = PyUnicode_FromEncodedObject(s, NULL, "strict");
Py_DECREF(s);
return result;
}
#endif
return __Pyx_PyObject_FormatAndDecref(s, f);
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_FormatAndDecref(PyObject* s, PyObject* f) {
PyObject *result = PyObject_Format(s, f);
Py_DECREF(s);
return result;
}
//////////////////// PyUnicode_Unicode.proto ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj);/*proto*/
//////////////////// PyUnicode_Unicode ////////////////////
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Unicode(PyObject *obj) {
if (unlikely(obj == Py_None))
obj = PYUNICODE("None");
return __Pyx_NewRef(obj);
}
//////////////////// PyObject_Unicode.proto ////////////////////
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyObject_Unicode(obj) \
(likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Str(obj))
#else
#define __Pyx_PyObject_Unicode(obj) \
(likely(PyUnicode_CheckExact(obj)) ? __Pyx_NewRef(obj) : PyObject_Unicode(obj))
#endif
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/TestCyUtilityLoader.pyx 0000644 0001751 0000171 00000000230 00000000000 023332 0 ustar 00runner docker 0000000 0000000 ########## TestCyUtilityLoader ##########
#@requires: OtherUtility
test {{cy_loader}} impl
########## OtherUtility ##########
req {{cy_loader}} impl
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/TestCythonScope.pyx 0000644 0001751 0000171 00000003073 00000000000 022512 0 ustar 00runner docker 0000000 0000000 ########## TestClass ##########
# These utilities are for testing purposes
cdef extern from *:
cdef object __pyx_test_dep(object)
@cname('__pyx_TestClass')
cdef class TestClass(object):
cdef public int value
def __init__(self, int value):
self.value = value
def __str__(self):
return 'TestClass(%d)' % self.value
cdef cdef_method(self, int value):
print 'Hello from cdef_method', value
cpdef cpdef_method(self, int value):
print 'Hello from cpdef_method', value
def def_method(self, int value):
print 'Hello from def_method', value
@cname('cdef_cname')
cdef cdef_cname_method(self, int value):
print "Hello from cdef_cname_method", value
@cname('cpdef_cname')
cpdef cpdef_cname_method(self, int value):
print "Hello from cpdef_cname_method", value
@cname('def_cname')
def def_cname_method(self, int value):
print "Hello from def_cname_method", value
@cname('__pyx_test_call_other_cy_util')
cdef test_call(obj):
print 'test_call'
__pyx_test_dep(obj)
@cname('__pyx_TestClass_New')
cdef _testclass_new(int value):
return TestClass(value)
########### TestDep ##########
@cname('__pyx_test_dep')
cdef test_dep(obj):
print 'test_dep', obj
########## TestScope ##########
@cname('__pyx_testscope')
cdef object _testscope(int value):
return "hello from cython scope, value=%d" % value
########## View.TestScope ##########
@cname('__pyx_view_testscope')
cdef object _testscope(int value):
return "hello from cython.view scope, value=%d" % value
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/TestUtilityLoader.c 0000644 0001751 0000171 00000000427 00000000000 022450 0 ustar 00runner docker 0000000 0000000 ////////// TestUtilityLoader.proto //////////
test {{loader}} prototype
////////// TestUtilityLoader //////////
//@requires: OtherUtility
test {{loader}} impl
////////// OtherUtility.proto //////////
req {{loader}} proto
////////// OtherUtility //////////
req {{loader}} impl
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/TypeConversion.c 0000644 0001751 0000171 00000106716 00000000000 022015 0 ustar 00runner docker 0000000 0000000 /////////////// TypeConversions.proto ///////////////
/* Type Conversion Predeclarations */
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \
(sizeof(type) < sizeof(Py_ssize_t)) || \
(sizeof(type) > sizeof(Py_ssize_t) && \
likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX) && \
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \
v == (type)PY_SSIZE_T_MIN))) || \
(sizeof(type) == sizeof(Py_ssize_t) && \
(is_signed || likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
// Optimisation from Section 14.2 "Bounds Checking" in
// https://www.agner.org/optimize/optimizing_cpp.pdf
// See https://bugs.python.org/issue28397
// The cast to unsigned effectively tests for "0 <= i < limit".
return (size_t) i < (size_t) limit;
}
// fast and unsafe abs(Py_ssize_t) that ignores the overflow for (-PY_SSIZE_T_MAX-1)
#if defined (__cplusplus) && __cplusplus >= 201103L
#include
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
// abs() is defined for long, but 64-bits type on MSVC is long long.
// Use MS-specific _abs64 instead.
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
// gcc or clang on 64 bit windows.
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
// There used to be a Py_UNICODE_strlen() in CPython 3.x, but it is deprecated since Py3.3.
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj) \
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
// __PYX_DEFAULT_STRING_ENCODING is either a user provided string constant
// or we need to look it up here
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/////////////// TypeConversions ///////////////
/* Type Conversion Functions */
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
// Py3.7 returns a "const char*" for unicode strings
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
// borrowed reference, cached internally in 'o' by CPython
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
// raise the error
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif /*__PYX_DEFAULT_STRING_ENCODING_IS_ASCII*/
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else /* CYTHON_PEP393_ENABLED: */
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
// cached for the lifetime of the object
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
// raise the error
PyUnicode_AsASCIIString(o);
return NULL;
}
#else /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
return PyUnicode_AsUTF8AndSize(o, length);
#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII */
}
#endif /* CYTHON_PEP393_ENABLED */
#endif
// Py3.7 returns a "const char*" for unicode strings
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif /* __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT */
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
/* Note: __Pyx_PyObject_IsTrue is written to minimize branching. */
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
// CPython issue #17576: warn if 'result' not of exact type int.
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
{{py: from Cython.Utility import pylong_join }}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
// handle most common case first to avoid indirect branch and optimise branch prediction
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
{{for _size in (2, 3, 4)}}
{{for _case in (_size, -_size)}}
case {{_case}}:
if (8 * sizeof(Py_ssize_t) > {{_size}} * PyLong_SHIFT) {
return {{'-' if _case < 0 else ''}}(Py_ssize_t) {{pylong_join(_size, 'digits', 'size_t')}};
}
break;
{{endfor}}
{{endfor}}
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) {
if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) {
return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o);
#if PY_MAJOR_VERSION < 3
} else if (likely(PyInt_CheckExact(o))) {
return PyInt_AS_LONG(o);
#endif
} else {
Py_ssize_t ival;
PyObject *x;
x = PyNumber_Index(o);
if (!x) return -1;
ival = PyInt_AsLong(x);
Py_DECREF(x);
return ival;
}
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
/////////////// GCCDiagnostics.proto ///////////////
// GCC diagnostic pragmas were introduced in GCC 4.6
// Used to silence conversion warnings that are ok but cannot be avoided.
#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6))
#define __Pyx_HAS_GCC_DIAGNOSTIC
#endif
/////////////// ToPyCTupleUtility.proto ///////////////
static PyObject* {{funcname}}({{struct_type_decl}});
/////////////// ToPyCTupleUtility ///////////////
static PyObject* {{funcname}}({{struct_type_decl}} value) {
PyObject* item = NULL;
PyObject* result = PyTuple_New({{size}});
if (!result) goto bad;
{{for ix, component in enumerate(components):}}
{{py:attr = "value.f%s" % ix}}
item = {{component.to_py_function}}({{attr}});
if (!item) goto bad;
PyTuple_SET_ITEM(result, {{ix}}, item);
{{endfor}}
return result;
bad:
Py_XDECREF(item);
Py_XDECREF(result);
return NULL;
}
/////////////// FromPyCTupleUtility.proto ///////////////
static {{struct_type_decl}} {{funcname}}(PyObject *);
/////////////// FromPyCTupleUtility ///////////////
static {{struct_type_decl}} {{funcname}}(PyObject * o) {
{{struct_type_decl}} result;
if (!PyTuple_Check(o) || PyTuple_GET_SIZE(o) != {{size}}) {
PyErr_Format(PyExc_TypeError, "Expected %.16s of size %d, got %.200s", "a tuple", {{size}}, Py_TYPE(o)->tp_name);
goto bad;
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
{{for ix, component in enumerate(components):}}
{{py:attr = "result.f%s" % ix}}
{{attr}} = {{component.from_py_function}}(PyTuple_GET_ITEM(o, {{ix}}));
if ({{component.error_condition(attr)}}) goto bad;
{{endfor}}
#else
{
PyObject *item;
{{for ix, component in enumerate(components):}}
{{py:attr = "result.f%s" % ix}}
item = PySequence_ITEM(o, {{ix}}); if (unlikely(!item)) goto bad;
{{attr}} = {{component.from_py_function}}(item);
Py_DECREF(item);
if ({{component.error_condition(attr)}}) goto bad;
{{endfor}}
}
#endif
return result;
bad:
return result;
}
/////////////// UnicodeAsUCS4.proto ///////////////
static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*);
/////////////// UnicodeAsUCS4 ///////////////
static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) {
Py_ssize_t length;
#if CYTHON_PEP393_ENABLED
length = PyUnicode_GET_LENGTH(x);
if (likely(length == 1)) {
return PyUnicode_READ_CHAR(x, 0);
}
#else
length = PyUnicode_GET_SIZE(x);
if (likely(length == 1)) {
return PyUnicode_AS_UNICODE(x)[0];
}
#if Py_UNICODE_SIZE == 2
else if (PyUnicode_GET_SIZE(x) == 2) {
Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0];
if (high_val >= 0xD800 && high_val <= 0xDBFF) {
Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1];
if (low_val >= 0xDC00 && low_val <= 0xDFFF) {
return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1)));
}
}
}
#endif
#endif
PyErr_Format(PyExc_ValueError,
"only single character unicode strings can be converted to Py_UCS4, "
"got length %" CYTHON_FORMAT_SSIZE_T "d", length);
return (Py_UCS4)-1;
}
/////////////// ObjectAsUCS4.proto ///////////////
//@requires: UnicodeAsUCS4
#define __Pyx_PyObject_AsPy_UCS4(x) \
(likely(PyUnicode_Check(x)) ? __Pyx_PyUnicode_AsPy_UCS4(x) : __Pyx__PyObject_AsPy_UCS4(x))
static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject*);
/////////////// ObjectAsUCS4 ///////////////
static Py_UCS4 __Pyx__PyObject_AsPy_UCS4_raise_error(long ival) {
if (ival < 0) {
if (!PyErr_Occurred())
PyErr_SetString(PyExc_OverflowError,
"cannot convert negative value to Py_UCS4");
} else {
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to Py_UCS4");
}
return (Py_UCS4)-1;
}
static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject* x) {
long ival;
ival = __Pyx_PyInt_As_long(x);
if (unlikely(!__Pyx_is_valid_index(ival, 1114111 + 1))) {
return __Pyx__PyObject_AsPy_UCS4_raise_error(ival);
}
return (Py_UCS4)ival;
}
/////////////// ObjectAsPyUnicode.proto ///////////////
static CYTHON_INLINE Py_UNICODE __Pyx_PyObject_AsPy_UNICODE(PyObject*);
/////////////// ObjectAsPyUnicode ///////////////
static CYTHON_INLINE Py_UNICODE __Pyx_PyObject_AsPy_UNICODE(PyObject* x) {
long ival;
#if CYTHON_PEP393_ENABLED
#if Py_UNICODE_SIZE > 2
const long maxval = 1114111;
#else
const long maxval = 65535;
#endif
#else
static long maxval = 0;
#endif
if (PyUnicode_Check(x)) {
if (unlikely(__Pyx_PyUnicode_GET_LENGTH(x) != 1)) {
PyErr_Format(PyExc_ValueError,
"only single character unicode strings can be converted to Py_UNICODE, "
"got length %" CYTHON_FORMAT_SSIZE_T "d", __Pyx_PyUnicode_GET_LENGTH(x));
return (Py_UNICODE)-1;
}
#if CYTHON_PEP393_ENABLED
ival = PyUnicode_READ_CHAR(x, 0);
#else
return PyUnicode_AS_UNICODE(x)[0];
#endif
} else {
#if !CYTHON_PEP393_ENABLED
if (unlikely(!maxval))
maxval = (long)PyUnicode_GetMax();
#endif
ival = __Pyx_PyInt_As_long(x);
}
if (unlikely(!__Pyx_is_valid_index(ival, maxval + 1))) {
if (ival < 0) {
if (!PyErr_Occurred())
PyErr_SetString(PyExc_OverflowError,
"cannot convert negative value to Py_UNICODE");
return (Py_UNICODE)-1;
} else {
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to Py_UNICODE");
}
return (Py_UNICODE)-1;
}
return (Py_UNICODE)ival;
}
/////////////// CIntToPy.proto ///////////////
static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value);
/////////////// CIntToPy ///////////////
//@requires: GCCDiagnostics
static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof({{TYPE}}) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof({{TYPE}}) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof({{TYPE}}) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof({{TYPE}}) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof({{TYPE}}) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof({{TYPE}}),
little, !is_unsigned);
}
}
/////////////// CIntToDigits ///////////////
static const char DIGIT_PAIRS_10[2*10*10+1] = {
"00010203040506070809"
"10111213141516171819"
"20212223242526272829"
"30313233343536373839"
"40414243444546474849"
"50515253545556575859"
"60616263646566676869"
"70717273747576777879"
"80818283848586878889"
"90919293949596979899"
};
static const char DIGIT_PAIRS_8[2*8*8+1] = {
"0001020304050607"
"1011121314151617"
"2021222324252627"
"3031323334353637"
"4041424344454647"
"5051525354555657"
"6061626364656667"
"7071727374757677"
};
static const char DIGITS_HEX[2*16+1] = {
"0123456789abcdef"
"0123456789ABCDEF"
};
/////////////// CIntToPyUnicode.proto ///////////////
static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t width, char padding_char, char format_char);
/////////////// CIntToPyUnicode ///////////////
//@requires: StringTools.c::IncludeStringH
//@requires: StringTools.c::BuildPyUnicode
//@requires: CIntToDigits
//@requires: GCCDiagnostics
// NOTE: inlining because most arguments are constant, which collapses lots of code below
static CYTHON_INLINE PyObject* {{TO_PY_FUNCTION}}({{TYPE}} value, Py_ssize_t width, char padding_char, char format_char) {
// simple and conservative C string allocation on the stack: each byte gives at most 3 digits, plus sign
char digits[sizeof({{TYPE}})*3+2];
// 'dpos' points to end of digits array + 1 initially to allow for pre-decrement looping
char *dpos, *end = digits + sizeof({{TYPE}})*3+2;
const char *hex_digits = DIGITS_HEX;
Py_ssize_t length, ulength;
int prepend_sign, last_one_off;
{{TYPE}} remaining;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
if (format_char == 'X') {
hex_digits += 16;
format_char = 'x';
}
// surprise: even trivial sprintf() calls don't get optimised in gcc (4.8)
remaining = value; /* not using abs(value) to avoid overflow problems */
last_one_off = 0;
dpos = end;
do {
int digit_pos;
switch (format_char) {
case 'o':
digit_pos = abs((int)(remaining % (8*8)));
remaining = ({{TYPE}}) (remaining / (8*8));
dpos -= 2;
memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2); /* copy 2 digits at a time, unaligned */
last_one_off = (digit_pos < 8);
break;
case 'd':
digit_pos = abs((int)(remaining % (10*10)));
remaining = ({{TYPE}}) (remaining / (10*10));
dpos -= 2;
memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2); /* copy 2 digits at a time, unaligned */
last_one_off = (digit_pos < 10);
break;
case 'x':
*(--dpos) = hex_digits[abs((int)(remaining % 16))];
remaining = ({{TYPE}}) (remaining / 16);
break;
default:
assert(0);
break;
}
} while (unlikely(remaining != 0));
if (last_one_off) {
assert(*dpos == '0');
dpos++;
}
length = end - dpos;
ulength = length;
prepend_sign = 0;
if (!is_unsigned && value <= neg_one) {
if (padding_char == ' ' || width <= length + 1) {
*(--dpos) = '-';
++length;
} else {
prepend_sign = 1;
}
++ulength;
}
if (width > ulength) {
ulength = width;
}
// single character unicode strings are cached in CPython => use PyUnicode_FromOrdinal() for them
if (ulength == 1) {
return PyUnicode_FromOrdinal(*dpos);
}
return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char);
}
/////////////// CBIntToPyUnicode.proto ///////////////
#define {{TO_PY_FUNCTION}}(value) \
((value) ? __Pyx_NewRef({{TRUE_CONST}}) : __Pyx_NewRef({{FALSE_CONST}}))
/////////////// PyIntFromDouble.proto ///////////////
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value);
#else
#define __Pyx_PyInt_FromDouble(value) PyLong_FromDouble(value)
#endif
/////////////// PyIntFromDouble ///////////////
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE PyObject* __Pyx_PyInt_FromDouble(double value) {
if (value >= (double)LONG_MIN && value <= (double)LONG_MAX) {
return PyInt_FromLong((long)value);
}
return PyLong_FromDouble(value);
}
#endif
/////////////// CIntFromPyVerify ///////////////
// see CIntFromPy
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value) \
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value) \
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc) \
{ \
func_type value = func_value; \
if (sizeof(target_type) < sizeof(func_type)) { \
if (unlikely(value != (func_type) (target_type) value)) { \
func_type zero = 0; \
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred())) \
return (target_type) -1; \
if (is_unsigned && unlikely(value < zero)) \
goto raise_neg_overflow; \
else \
goto raise_overflow; \
} \
} \
return (target_type) value; \
}
/////////////// CIntFromPy.proto ///////////////
static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *);
/////////////// CIntFromPy ///////////////
//@requires: CIntFromPyVerify
//@requires: GCCDiagnostics
{{py: from Cython.Utility import pylong_join }}
static CYTHON_INLINE {{TYPE}} {{FROM_PY_FUNCTION}}(PyObject *x) {
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#endif
const {{TYPE}} neg_one = ({{TYPE}}) -1, const_zero = ({{TYPE}}) 0;
#ifdef __Pyx_HAS_GCC_DIAGNOSTIC
#pragma GCC diagnostic pop
#endif
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof({{TYPE}}) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT({{TYPE}}, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return ({{TYPE}}) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return ({{TYPE}}) 0;
case 1: __PYX_VERIFY_RETURN_INT({{TYPE}}, digit, digits[0])
{{for _size in (2, 3, 4)}}
case {{_size}}:
if (8 * sizeof({{TYPE}}) > {{_size-1}} * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT({{TYPE}}, unsigned long, {{pylong_join(_size, 'digits')}})
} else if (8 * sizeof({{TYPE}}) >= {{_size}} * PyLong_SHIFT) {
return ({{TYPE}}) {{pylong_join(_size, 'digits', TYPE)}};
}
}
break;
{{endfor}}
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
// misuse Py_False as a quick way to compare to a '0' int object in PyPy
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return ({{TYPE}}) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof({{TYPE}}) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof({{TYPE}}) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
// signed
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return ({{TYPE}}) 0;
case -1: __PYX_VERIFY_RETURN_INT({{TYPE}}, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT({{TYPE}}, digit, +digits[0])
{{for _size in (2, 3, 4)}}
{{for _case in (-_size, _size)}}
case {{_case}}:
if (8 * sizeof({{TYPE}}){{' - 1' if _case < 0 else ''}} > {{_size-1}} * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > {{_size}} * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT({{TYPE}}, {{'long' if _case < 0 else 'unsigned long'}}, {{'-(long) ' if _case < 0 else ''}}{{pylong_join(_size, 'digits')}})
} else if (8 * sizeof({{TYPE}}) - 1 > {{_size}} * PyLong_SHIFT) {
return ({{TYPE}}) ({{'((%s)-1)*' % TYPE if _case < 0 else ''}}{{pylong_join(_size, 'digits', TYPE)}});
}
}
break;
{{endfor}}
{{endfor}}
}
#endif
if (sizeof({{TYPE}}) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof({{TYPE}}) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC({{TYPE}}, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
{{TYPE}} val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return ({{TYPE}}) -1;
}
} else {
{{TYPE}} val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return ({{TYPE}}) -1;
val = {{FROM_PY_FUNCTION}}(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to {{TYPE}}");
return ({{TYPE}}) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to {{TYPE}}");
return ({{TYPE}}) -1;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/__init__.py 0000644 0001751 0000171 00000002207 00000000000 020761 0 ustar 00runner docker 0000000 0000000
def pylong_join(count, digits_ptr='digits', join_type='unsigned long'):
"""
Generate an unrolled shift-then-or loop over the first 'count' digits.
Assumes that they fit into 'join_type'.
(((d[2] << n) | d[1]) << n) | d[0]
"""
return ('(' * (count * 2) + ' | '.join(
"(%s)%s[%d])%s)" % (join_type, digits_ptr, _i, " << PyLong_SHIFT" if _i else '')
for _i in range(count-1, -1, -1)))
# although it could potentially make use of data independence,
# this implementation is a bit slower than the simpler one above
def _pylong_join(count, digits_ptr='digits', join_type='unsigned long'):
"""
Generate an or-ed series of shifts for the first 'count' digits.
Assumes that they fit into 'join_type'.
(d[2] << 2*n) | (d[1] << 1*n) | d[0]
"""
def shift(n):
# avoid compiler warnings for overly large shifts that will be discarded anyway
return " << (%d * PyLong_SHIFT < 8 * sizeof(%s) ? %d * PyLong_SHIFT : 0)" % (n, join_type, n) if n else ''
return '(%s)' % ' | '.join(
"(((%s)%s[%d])%s)" % (join_type, digits_ptr, i, shift(i))
for i in range(count-1, -1, -1))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utility/arrayarray.h 0000644 0001751 0000171 00000007771 00000000000 021211 0 ustar 00runner docker 0000000 0000000 /////////////// ArrayAPI.proto ///////////////
// arrayarray.h
//
// Artificial C-API for Python's type,
// used by array.pxd
//
// last changes: 2009-05-15 rk
// 2012-05-02 andreasvc
// (see revision control)
//
#ifndef _ARRAYARRAY_H
#define _ARRAYARRAY_H
// These two forward declarations are explicitly handled in the type
// declaration code, as including them here is too late for cython-defined
// types to use them.
// struct arrayobject;
// typedef struct arrayobject arrayobject;
// All possible arraydescr values are defined in the vector "descriptors"
// below. That's defined later because the appropriate get and set
// functions aren't visible yet.
typedef struct arraydescr {
int typecode;
int itemsize;
PyObject * (*getitem)(struct arrayobject *, Py_ssize_t);
int (*setitem)(struct arrayobject *, Py_ssize_t, PyObject *);
#if PY_MAJOR_VERSION >= 3
char *formats;
#endif
} arraydescr;
struct arrayobject {
PyObject_HEAD
Py_ssize_t ob_size;
union {
char *ob_item;
float *as_floats;
double *as_doubles;
int *as_ints;
unsigned int *as_uints;
unsigned char *as_uchars;
signed char *as_schars;
char *as_chars;
unsigned long *as_ulongs;
long *as_longs;
#if PY_MAJOR_VERSION >= 3
unsigned long long *as_ulonglongs;
long long *as_longlongs;
#endif
short *as_shorts;
unsigned short *as_ushorts;
Py_UNICODE *as_pyunicodes;
void *as_voidptr;
} data;
Py_ssize_t allocated;
struct arraydescr *ob_descr;
PyObject *weakreflist; /* List of weak references */
#if PY_MAJOR_VERSION >= 3
int ob_exports; /* Number of exported buffers */
#endif
};
#ifndef NO_NEWARRAY_INLINE
// fast creation of a new array
static CYTHON_INLINE PyObject * newarrayobject(PyTypeObject *type, Py_ssize_t size,
struct arraydescr *descr) {
arrayobject *op;
size_t nbytes;
if (size < 0) {
PyErr_BadInternalCall();
return NULL;
}
nbytes = size * descr->itemsize;
// Check for overflow
if (nbytes / descr->itemsize != (size_t)size) {
return PyErr_NoMemory();
}
op = (arrayobject *) type->tp_alloc(type, 0);
if (op == NULL) {
return NULL;
}
op->ob_descr = descr;
op->allocated = size;
op->weakreflist = NULL;
__Pyx_SET_SIZE(op, size);
if (size <= 0) {
op->data.ob_item = NULL;
}
else {
op->data.ob_item = PyMem_NEW(char, nbytes);
if (op->data.ob_item == NULL) {
Py_DECREF(op);
return PyErr_NoMemory();
}
}
return (PyObject *) op;
}
#else
PyObject* newarrayobject(PyTypeObject *type, Py_ssize_t size,
struct arraydescr *descr);
#endif /* ifndef NO_NEWARRAY_INLINE */
// fast resize (reallocation to the point)
// not designed for filing small increments (but for fast opaque array apps)
static CYTHON_INLINE int resize(arrayobject *self, Py_ssize_t n) {
void *items = (void*) self->data.ob_item;
PyMem_Resize(items, char, (size_t)(n * self->ob_descr->itemsize));
if (items == NULL) {
PyErr_NoMemory();
return -1;
}
self->data.ob_item = (char*) items;
__Pyx_SET_SIZE(self, n);
self->allocated = n;
return 0;
}
// suitable for small increments; over allocation 50% ;
static CYTHON_INLINE int resize_smart(arrayobject *self, Py_ssize_t n) {
void *items = (void*) self->data.ob_item;
Py_ssize_t newsize;
if (n < self->allocated && n*4 > self->allocated) {
__Pyx_SET_SIZE(self, n);
return 0;
}
newsize = n + (n / 2) + 1;
if (newsize <= n) { /* overflow */
PyErr_NoMemory();
return -1;
}
PyMem_Resize(items, char, (size_t)(newsize * self->ob_descr->itemsize));
if (items == NULL) {
PyErr_NoMemory();
return -1;
}
self->data.ob_item = (char*) items;
__Pyx_SET_SIZE(self, n);
self->allocated = newsize;
return 0;
}
#endif
/* _ARRAYARRAY_H */
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/Utils.py 0000644 0001751 0000171 00000032222 00000000000 016657 0 ustar 00runner docker 0000000 0000000 #
# Cython -- Things that don't belong
# anywhere else in particular
#
from __future__ import absolute_import
try:
from __builtin__ import basestring
except ImportError:
basestring = str
try:
FileNotFoundError
except NameError:
FileNotFoundError = OSError
import os
import sys
import re
import io
import codecs
import shutil
import tempfile
from contextlib import contextmanager
modification_time = os.path.getmtime
_function_caches = []
def clear_function_caches():
for cache in _function_caches:
cache.clear()
def cached_function(f):
cache = {}
_function_caches.append(cache)
uncomputed = object()
def wrapper(*args):
res = cache.get(args, uncomputed)
if res is uncomputed:
res = cache[args] = f(*args)
return res
wrapper.uncached = f
return wrapper
def cached_method(f):
cache_name = '__%s_cache' % f.__name__
def wrapper(self, *args):
cache = getattr(self, cache_name, None)
if cache is None:
cache = {}
setattr(self, cache_name, cache)
if args in cache:
return cache[args]
res = cache[args] = f(self, *args)
return res
return wrapper
def replace_suffix(path, newsuf):
base, _ = os.path.splitext(path)
return base + newsuf
def open_new_file(path):
if os.path.exists(path):
# Make sure to create a new file here so we can
# safely hard link the output files.
os.unlink(path)
# we use the ISO-8859-1 encoding here because we only write pure
# ASCII strings or (e.g. for file names) byte encoded strings as
# Unicode, so we need a direct mapping from the first 256 Unicode
# characters to a byte sequence, which ISO-8859-1 provides
# note: can't use io.open() in Py2 as we may be writing str objects
return codecs.open(path, "w", encoding="ISO-8859-1")
def castrate_file(path, st):
# Remove junk contents from an output file after a
# failed compilation.
# Also sets access and modification times back to
# those specified by st (a stat struct).
try:
f = open_new_file(path)
except EnvironmentError:
pass
else:
f.write(
"#error Do not use this file, it is the result of a failed Cython compilation.\n")
f.close()
if st:
os.utime(path, (st.st_atime, st.st_mtime-1))
def file_newer_than(path, time):
ftime = modification_time(path)
return ftime > time
def safe_makedirs(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def copy_file_to_dir_if_newer(sourcefile, destdir):
"""
Copy file sourcefile to directory destdir (creating it if needed),
preserving metadata. If the destination file exists and is not
older than the source file, the copying is skipped.
"""
destfile = os.path.join(destdir, os.path.basename(sourcefile))
try:
desttime = modification_time(destfile)
except OSError:
# New file does not exist, destdir may or may not exist
safe_makedirs(destdir)
else:
# New file already exists
if not file_newer_than(sourcefile, desttime):
return
shutil.copy2(sourcefile, destfile)
@cached_function
def find_root_package_dir(file_path):
dir = os.path.dirname(file_path)
if file_path == dir:
return dir
elif is_package_dir(dir):
return find_root_package_dir(dir)
else:
return dir
@cached_function
def check_package_dir(dir, package_names):
for dirname in package_names:
dir = os.path.join(dir, dirname)
if not is_package_dir(dir):
return None
return dir
@cached_function
def is_package_dir(dir_path):
for filename in ("__init__.py",
"__init__.pyc",
"__init__.pyx",
"__init__.pxd"):
path = os.path.join(dir_path, filename)
if path_exists(path):
return 1
@cached_function
def path_exists(path):
# try on the filesystem first
if os.path.exists(path):
return True
# figure out if a PEP 302 loader is around
try:
loader = __loader__
# XXX the code below assumes a 'zipimport.zipimporter' instance
# XXX should be easy to generalize, but too lazy right now to write it
archive_path = getattr(loader, 'archive', None)
if archive_path:
normpath = os.path.normpath(path)
if normpath.startswith(archive_path):
arcname = normpath[len(archive_path)+1:]
try:
loader.get_data(arcname)
return True
except IOError:
return False
except NameError:
pass
return False
# file name encodings
def decode_filename(filename):
if isinstance(filename, bytes):
try:
filename_encoding = sys.getfilesystemencoding()
if filename_encoding is None:
filename_encoding = sys.getdefaultencoding()
filename = filename.decode(filename_encoding)
except UnicodeDecodeError:
pass
return filename
# support for source file encoding detection
_match_file_encoding = re.compile(br"(\w*coding)[:=]\s*([-\w.]+)").search
def detect_opened_file_encoding(f):
# PEPs 263 and 3120
# Most of the time the first two lines fall in the first couple of hundred chars,
# and this bulk read/split is much faster.
lines = ()
start = b''
while len(lines) < 3:
data = f.read(500)
start += data
lines = start.split(b"\n")
if not data:
break
m = _match_file_encoding(lines[0])
if m and m.group(1) != b'c_string_encoding':
return m.group(2).decode('iso8859-1')
elif len(lines) > 1:
m = _match_file_encoding(lines[1])
if m:
return m.group(2).decode('iso8859-1')
return "UTF-8"
def skip_bom(f):
"""
Read past a BOM at the beginning of a source file.
This could be added to the scanner, but it's *substantially* easier
to keep it at this level.
"""
if f.read(1) != u'\uFEFF':
f.seek(0)
def open_source_file(source_filename, encoding=None, error_handling=None):
stream = None
try:
if encoding is None:
# Most of the time the encoding is not specified, so try hard to open the file only once.
f = io.open(source_filename, 'rb')
encoding = detect_opened_file_encoding(f)
f.seek(0)
stream = io.TextIOWrapper(f, encoding=encoding, errors=error_handling)
else:
stream = io.open(source_filename, encoding=encoding, errors=error_handling)
except OSError:
if os.path.exists(source_filename):
raise # File is there, but something went wrong reading from it.
# Allow source files to be in zip files etc.
try:
loader = __loader__
if source_filename.startswith(loader.archive):
stream = open_source_from_loader(
loader, source_filename,
encoding, error_handling)
except (NameError, AttributeError):
pass
if stream is None:
raise FileNotFoundError(source_filename)
skip_bom(stream)
return stream
def open_source_from_loader(loader,
source_filename,
encoding=None, error_handling=None):
nrmpath = os.path.normpath(source_filename)
arcname = nrmpath[len(loader.archive)+1:]
data = loader.get_data(arcname)
return io.TextIOWrapper(io.BytesIO(data),
encoding=encoding,
errors=error_handling)
def str_to_number(value):
# note: this expects a string as input that was accepted by the
# parser already, with an optional "-" sign in front
is_neg = False
if value[:1] == '-':
is_neg = True
value = value[1:]
if len(value) < 2:
value = int(value, 0)
elif value[0] == '0':
literal_type = value[1] # 0'o' - 0'b' - 0'x'
if literal_type in 'xX':
# hex notation ('0x1AF')
value = int(value[2:], 16)
elif literal_type in 'oO':
# Py3 octal notation ('0o136')
value = int(value[2:], 8)
elif literal_type in 'bB':
# Py3 binary notation ('0b101')
value = int(value[2:], 2)
else:
# Py2 octal notation ('0136')
value = int(value, 8)
else:
value = int(value, 0)
return -value if is_neg else value
def long_literal(value):
if isinstance(value, basestring):
value = str_to_number(value)
return not -2**31 <= value < 2**31
@cached_function
def get_cython_cache_dir():
r"""
Return the base directory containing Cython's caches.
Priority:
1. CYTHON_CACHE_DIR
2. (OS X): ~/Library/Caches/Cython
(posix not OS X): XDG_CACHE_HOME/cython if XDG_CACHE_HOME defined
3. ~/.cython
"""
if 'CYTHON_CACHE_DIR' in os.environ:
return os.environ['CYTHON_CACHE_DIR']
parent = None
if os.name == 'posix':
if sys.platform == 'darwin':
parent = os.path.expanduser('~/Library/Caches')
else:
# this could fallback on ~/.cache
parent = os.environ.get('XDG_CACHE_HOME')
if parent and os.path.isdir(parent):
return os.path.join(parent, 'cython')
# last fallback: ~/.cython
return os.path.expanduser(os.path.join('~', '.cython'))
@contextmanager
def captured_fd(stream=2, encoding=None):
orig_stream = os.dup(stream) # keep copy of original stream
try:
with tempfile.TemporaryFile(mode="a+b") as temp_file:
def read_output(_output=[b'']):
if not temp_file.closed:
temp_file.seek(0)
_output[0] = temp_file.read()
return _output[0]
os.dup2(temp_file.fileno(), stream) # replace stream by copy of pipe
try:
def get_output():
result = read_output()
return result.decode(encoding) if encoding else result
yield get_output
finally:
os.dup2(orig_stream, stream) # restore original stream
read_output() # keep the output in case it's used after closing the context manager
finally:
os.close(orig_stream)
def print_bytes(s, header_text=None, end=b'\n', file=sys.stdout, flush=True):
if header_text:
file.write(header_text) # note: text! => file.write() instead of out.write()
file.flush()
try:
out = file.buffer # Py3
except AttributeError:
out = file # Py2
out.write(s)
if end:
out.write(end)
if flush:
out.flush()
class LazyStr:
def __init__(self, callback):
self.callback = callback
def __str__(self):
return self.callback()
def __repr__(self):
return self.callback()
def __add__(self, right):
return self.callback() + right
def __radd__(self, left):
return left + self.callback()
class OrderedSet(object):
def __init__(self, elements=()):
self._list = []
self._set = set()
self.update(elements)
def __iter__(self):
return iter(self._list)
def update(self, elements):
for e in elements:
self.add(e)
def add(self, e):
if e not in self._set:
self._list.append(e)
self._set.add(e)
# Class decorator that adds a metaclass and recreates the class with it.
# Copied from 'six'.
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def raise_error_if_module_name_forbidden(full_module_name):
#it is bad idea to call the pyx-file cython.pyx, so fail early
if full_module_name == 'cython' or full_module_name.startswith('cython.'):
raise ValueError('cython is a special module, cannot be used as a module name')
def build_hex_version(version_string):
"""
Parse and translate '4.3a1' into the readable hex representation '0x040300A1' (like PY_VERSION_HEX).
"""
# First, parse '4.12a1' into [4, 12, 0, 0xA01].
digits = []
release_status = 0xF0
for digit in re.split('([.abrc]+)', version_string):
if digit in ('a', 'b', 'rc'):
release_status = {'a': 0xA0, 'b': 0xB0, 'rc': 0xC0}[digit]
digits = (digits + [0, 0])[:3] # 1.2a1 -> 1.2.0a1
elif digit != '.':
digits.append(int(digit))
digits = (digits + [0] * 3)[:4]
digits[3] += release_status
# Then, build a single hex value, two hex digits per version part.
hexversion = 0
for digit in digits:
hexversion = (hexversion << 8) + digit
return '0x%08X' % hexversion
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Cython/__init__.py 0000644 0001751 0000171 00000000546 00000000000 017322 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import
from .Shadow import __version__
# Void cython.* directives (for case insensitive operating systems).
from .Shadow import *
def load_ipython_extension(ip):
"""Load the extension in IPython."""
from .Build.IpythonMagic import CythonMagics # pylint: disable=cyclic-import
ip.register_magics(CythonMagics)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/Demos/ 0000755 0001751 0000171 00000000000 00000000000 015007 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/Makefile 0000644 0001751 0000171 00000000516 00000000000 016451 0 ustar 00runner docker 0000000 0000000 all:
python setup.py build_ext --inplace
test: all
python run_primes.py 20
python run_numeric_demo.py
python run_spam.py
python integrate_timing.py
cd callback; $(MAKE) test
cd embed; $(MAKE) test
clean:
@echo Cleaning Demos
@rm -f *.c *.o *.so *~ core
@rm -rf build
@cd callback; $(MAKE) clean
@cd embed; $(MAKE) clean
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/Makefile.nodistutils 0000644 0001751 0000171 00000000606 00000000000 021031 0 ustar 00runner docker 0000000 0000000 PYHOME = $(HOME)/pkg/python/version
PYINCLUDE = \
-I$(PYHOME)/include/python2.2 \
-I$(PYHOME)/$(ARCH)/include/python2.2
%.c: %.pyx
../bin/cython $<
%.o: %.c
gcc -c -fPIC $(PYINCLUDE) $<
%.so: %.o
gcc -shared $< -lm -o $@
all: primes.so spam.so numeric_demo.so
clean:
@echo Cleaning Demos
@rm -f *.c *.o *.so *~ core core.*
@cd callback; $(MAKE) clean
@cd embed; $(MAKE) clean
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/Demos/benchmarks/ 0000755 0001751 0000171 00000000000 00000000000 017124 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/bpnn3.pxd 0000644 0001751 0000171 00000001176 00000000000 020666 0 ustar 00runner docker 0000000 0000000 cimport cython
cdef double rand(double a, double b, random=*)
@cython.locals(i=Py_ssize_t)
cdef list makeMatrix(Py_ssize_t I, Py_ssize_t J, fill=*)
cdef class NN:
cdef Py_ssize_t ni, nh, no
cdef list ai, ah, ao
cdef list wi, wo
cdef list ci, co
@cython.locals(i=Py_ssize_t, j=Py_ssize_t, k=Py_ssize_t)
cpdef update(self, list inputs)
@cython.locals(i=Py_ssize_t, j=Py_ssize_t, k=Py_ssize_t, change=double)
cpdef double backPropagate(self, list targets, double N, M)
@cython.locals(i=Py_ssize_t, p=list, error=double)
cpdef train(self, list patterns, Py_ssize_t iterations=*, double N=*, M=*)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/bpnn3.py 0000644 0001751 0000171 00000012354 00000000000 020523 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/python
# Back-Propagation Neural Networks
#
# Written in Python. See http://www.python.org/
#
# Neil Schemenauer
import math
import random as random
# Local imports
import util
random.seed(0)
# calculate a random number where: a <= rand < b
def rand(a, b, random=random.random):
return (b-a)*random() + a
# Make a matrix (we could use NumPy to speed this up)
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill]*J)
return m
class NN(object):
# print 'class NN'
def __init__(self, ni, nh, no):
# number of input, hidden, and output nodes
self.ni = ni + 1 # +1 for bias node
self.nh = nh
self.no = no
# activations for nodes
self.ai = [1.0]*self.ni
self.ah = [1.0]*self.nh
self.ao = [1.0]*self.no
# create weights
self.wi = makeMatrix(self.ni, self.nh)
self.wo = makeMatrix(self.nh, self.no)
# set them to random values
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = rand(-2.0, 2.0)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = rand(-2.0, 2.0)
# last change in weights for momentum
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)
def update(self, inputs):
# print 'update', inputs
if len(inputs) != self.ni-1:
raise ValueError('wrong number of inputs')
# input activations
for i in range(self.ni-1):
#self.ai[i] = 1.0/(1.0+math.exp(-inputs[i]))
self.ai[i] = inputs[i]
# hidden activations
for j in range(self.nh):
sum = 0.0
for i in range(self.ni):
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = 1.0/(1.0+math.exp(-sum))
# output activations
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = 1.0/(1.0+math.exp(-sum))
return self.ao[:]
def backPropagate(self, targets, N, M):
# print N, M
if len(targets) != self.no:
raise ValueError('wrong number of target values')
# calculate error terms for output
output_deltas = [0.0] * self.no
# print self.no
for k in range(self.no):
ao = self.ao[k]
output_deltas[k] = ao*(1-ao)*(targets[k]-ao)
# calculate error terms for hidden
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
sum = 0.0
for k in range(self.no):
sum = sum + output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = self.ah[j]*(1-self.ah[j])*sum
# update output weights
for j in range(self.nh):
for k in range(self.no):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
self.co[j][k] = change
# update input weights
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j]*self.ai[i]
self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
self.ci[i][j] = change
# calculate error
error = 0.0
for k in range(len(targets)):
error = error + 0.5*(targets[k]-self.ao[k])**2
return error
def test(self, patterns):
for p in patterns:
print('%s -> %s' % (p[0], self.update(p[0])))
def weights(self):
print('Input weights:')
for i in range(self.ni):
print(self.wi[i])
print('')
print('Output weights:')
for j in range(self.nh):
print(self.wo[j])
def train(self, patterns, iterations=2000, N=0.5, M=0.1):
# N: learning rate
# M: momentum factor
for i in range(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
#if i % 100 == 0:
# print i, 'error %-14f' % error
def demo():
# Teach network XOR function
pat = [
[[0,0], [0]],
[[0,1], [1]],
[[1,0], [1]],
[[1,1], [0]]
]
# create a network with two input, two hidden, and two output nodes
n = NN(2, 3, 1)
# train it with some patterns
n.train(pat, 5000)
# test it
#n.test(pat)
def time(fn, *args):
import time, traceback
begin = time.time()
result = fn(*args)
end = time.time()
return result, end-begin
def test_bpnn(iterations):
times = []
for _ in range(iterations):
result, t = time(demo)
times.append(t)
return times
main = test_bpnn
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Test the performance of a neural network."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_bpnn)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/chaos.pxd 0000644 0001751 0000171 00000001500 00000000000 020732 0 ustar 00runner docker 0000000 0000000
cimport cython
cdef extern from "math.h":
cpdef double sqrt(double x)
@cython.final
cdef class GVector:
cdef public double x, y, z
cpdef double Mag(self)
cpdef double dist(self, GVector other)
cpdef list GetKnots(list points, long degree)
@cython.final
cdef class Spline:
cdef list knots
cdef list points
cdef long degree
cpdef (long, long) GetDomain(self)
cpdef long GetIndex(self, u)
@cython.final
cdef class Chaosgame:
cdef list splines
cdef double thickness
cdef double minx, miny, maxx, maxy, height, width
cdef list num_trafos
cdef double num_total
cpdef tuple get_random_trafo(self)
cpdef GVector transform_point(self, GVector point, trafo=*)
cpdef truncate(self, GVector point)
cpdef create_image_chaos(self, timer, long w, long h, long n)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/chaos.py 0000644 0001751 0000171 00000023260 00000000000 020576 0 ustar 00runner docker 0000000 0000000 # Copyright (C) 2005 Carl Friedrich Bolz
"""create chaosgame-like fractals
"""
from __future__ import division, print_function
import cython
import time
import operator
import optparse
import random
random.seed(1234)
from functools import reduce
if not cython.compiled:
from math import sqrt
class GVector(object):
def __init__(self, x = 0, y = 0, z = 0):
self.x = x
self.y = y
self.z = z
def Mag(self):
return sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
def dist(self, other):
return sqrt((self.x - other.x) ** 2 +
(self.y - other.y) ** 2 +
(self.z - other.z) ** 2)
@cython.locals(self="GVector", other="GVector")
def __add__(self, other):
if not isinstance(other, GVector):
raise ValueError("Can't add GVector to " + str(type(other)))
v = GVector(self.x + other.x, self.y + other.y, self.z + other.z)
return v
@cython.locals(self="GVector", other="GVector")
def __sub__(self, other):
return self + other * -1
@cython.locals(self="GVector", other=cython.double)
def __mul__(self, other):
v = GVector(self.x * other, self.y * other, self.z * other)
return v
__rmul__ = __mul__
@cython.locals(other="GVector", l1=cython.double, l2_=cython.double)
def linear_combination(self, other, l1, l2=None):
l2_ = 1 - l1 if l2 is None else l2
v = GVector(self.x * l1 + other.x * l2_,
self.y * l1 + other.y * l2_,
self.z * l1 + other.z * l2_)
return v
def __str__(self):
return "<%f, %f, %f>" % (self.x, self.y, self.z)
def __repr__(self):
return "GVector(%f, %f, %f)" % (self.x, self.y, self.z)
def GetKnots(points, degree):
knots = [0] * degree + range(1, len(points) - degree)
knots += [len(points) - degree] * degree
return knots
class Spline(object):
"""Class for representing B-Splines and NURBS of arbitrary degree"""
def __init__(self, points, degree = 3, knots = None):
"""Creates a Spline. points is a list of GVector, degree is the degree of the Spline."""
if knots is None:
self.knots = GetKnots(points, degree)
else:
if len(points) > len(knots) - degree + 1:
raise ValueError("too many control points")
elif len(points) < len(knots) - degree + 1:
raise ValueError("not enough control points")
last = knots[0]
for cur in knots[1:]:
if cur < last:
raise ValueError("knots not strictly increasing")
last = cur
self.knots = knots
self.points = points
self.degree = degree
def GetDomain(self):
"""Returns the domain of the B-Spline"""
return (self.knots[self.degree - 1],
self.knots[len(self.knots) - self.degree])
@cython.locals(ik=cython.long, ii=cython.long, I=cython.long,
ua=cython.long, ub=cython.long, u=cython.double,
dom=(cython.long, cython.long))
def __call__(self, u):
"""Calculates a point of the B-Spline using de Boors Algorithm"""
dom = self.GetDomain()
if u < dom[0] or u > dom[1]:
raise ValueError("Function value not in domain")
if u == dom[0]:
return self.points[0]
if u == dom[1]:
return self.points[-1]
I = self.GetIndex(u)
d = [self.points[I - self.degree + 1 + ii]
for ii in range(self.degree + 1)]
U = self.knots
for ik in range(1, self.degree + 1):
for ii in range(I - self.degree + ik + 1, I + 2):
ua = U[ii + self.degree - ik]
ub = U[ii - 1]
co1 = (ua - u) / (ua - ub)
co2 = (u - ub) / (ua - ub)
index = ii - I + self.degree - ik - 1
d[index] = d[index].linear_combination(d[index + 1], co1, co2)
return d[0]
@cython.locals(ii=cython.long, I=cython.long, dom=(cython.long, cython.long))
def GetIndex(self, u):
dom = self.GetDomain()
for ii in range(self.degree - 1, len(self.knots) - self.degree):
if self.knots[ii] <= u < self.knots[ii + 1]:
I = ii
break
else:
I = dom[1] - 1
return I
def __len__(self):
return len(self.points)
def __repr__(self):
return "Spline(%r, %r, %r)" % (self.points, self.degree, self.knots)
class Chaosgame(object):
@cython.locals(splines=list, thickness=cython.double, maxlength=cython.double, length=cython.double,
curr=GVector, last=GVector, p=GVector, spl=Spline, t=cython.double, i=int)
def __init__(self, splines, thickness=0.1):
self.splines = splines
self.thickness = thickness
self.minx = min([p.x for spl in splines for p in spl.points])
self.miny = min([p.y for spl in splines for p in spl.points])
self.maxx = max([p.x for spl in splines for p in spl.points])
self.maxy = max([p.y for spl in splines for p in spl.points])
self.height = self.maxy - self.miny
self.width = self.maxx - self.minx
self.num_trafos = []
maxlength = thickness * self.width / self.height
for spl in splines:
length = 0
curr = spl(0)
for i in range(1, 1000):
last = curr
t = 1 / 999 * i
curr = spl(t)
length += curr.dist(last)
self.num_trafos.append(max(1, int(length / maxlength * 1.5)))
self.num_total = reduce(operator.add, self.num_trafos, 0)
def get_random_trafo(self):
r = random.randrange(int(self.num_total) + 1)
l = 0
for i in range(len(self.num_trafos)):
if l <= r < l + self.num_trafos[i]:
return i, random.randrange(self.num_trafos[i])
l += self.num_trafos[i]
return len(self.num_trafos) - 1, random.randrange(self.num_trafos[-1])
@cython.locals(neighbour="GVector", basepoint="GVector", derivative="GVector",
seg_length=cython.double, start=cython.double, end=cython.double,
t=cython.double)
def transform_point(self, point, trafo=None):
x = (point.x - self.minx) / self.width
y = (point.y - self.miny) / self.height
if trafo is None:
trafo = self.get_random_trafo()
start, end = self.splines[trafo[0]].GetDomain()
length = end - start
seg_length = length / self.num_trafos[trafo[0]]
t = start + seg_length * trafo[1] + seg_length * x
basepoint = self.splines[trafo[0]](t)
if t + 1/50000 > end:
neighbour = self.splines[trafo[0]](t - 1/50000)
derivative = neighbour - basepoint
else:
neighbour = self.splines[trafo[0]](t + 1/50000)
derivative = basepoint - neighbour
if derivative.Mag() != 0:
basepoint.x += derivative.y / derivative.Mag() * (y - 0.5) * \
self.thickness
basepoint.y += -derivative.x / derivative.Mag() * (y - 0.5) * \
self.thickness
else:
print("r", end='')
self.truncate(basepoint)
return basepoint
def truncate(self, point):
if point.x >= self.maxx:
point.x = self.maxx
if point.y >= self.maxy:
point.y = self.maxy
if point.x < self.minx:
point.x = self.minx
if point.y < self.miny:
point.y = self.miny
@cython.locals(x=cython.long, y=cython.long)
def create_image_chaos(self, timer, w, h, n):
im = [[1] * h for i in range(w)]
point = GVector((self.maxx + self.minx) / 2,
(self.maxy + self.miny) / 2, 0)
times = []
for _ in range(n):
t1 = timer()
for i in range(5000):
point = self.transform_point(point)
x = int((point.x - self.minx) / self.width * w)
y = int((point.y - self.miny) / self.height * h)
if x == w:
x -= 1
if y == h:
y -= 1
im[x][h - y - 1] = 0
t2 = timer()
times.append(t2 - t1)
return times
def main(n, timer=time.time):
splines = [
Spline([
GVector(1.597350, 3.304460, 0.000000),
GVector(1.575810, 4.123260, 0.000000),
GVector(1.313210, 5.288350, 0.000000),
GVector(1.618900, 5.329910, 0.000000),
GVector(2.889940, 5.502700, 0.000000),
GVector(2.373060, 4.381830, 0.000000),
GVector(1.662000, 4.360280, 0.000000)],
3, [0, 0, 0, 1, 1, 1, 2, 2, 2]),
Spline([
GVector(2.804500, 4.017350, 0.000000),
GVector(2.550500, 3.525230, 0.000000),
GVector(1.979010, 2.620360, 0.000000),
GVector(1.979010, 2.620360, 0.000000)],
3, [0, 0, 0, 1, 1, 1]),
Spline([
GVector(2.001670, 4.011320, 0.000000),
GVector(2.335040, 3.312830, 0.000000),
GVector(2.366800, 3.233460, 0.000000),
GVector(2.366800, 3.233460, 0.000000)],
3, [0, 0, 0, 1, 1, 1])
]
c = Chaosgame(splines, 0.25)
return c.create_image_chaos(timer, 1000, 1200, n)
if __name__ == "__main__":
import util
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the Chaos benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/coroutines.py 0000644 0001751 0000171 00000003113 00000000000 021666 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/python3
# cython: language_level=3
# micro benchmarks for coroutines
COUNT = 100000
import cython
async def done(n):
return n
@cython.locals(N=cython.Py_ssize_t)
async def count_to(N):
count = 0
for i in range(N):
count += await done(i)
return count
async def await_all(*coroutines):
count = 0
for coro in coroutines:
count += await coro
return count
@cython.locals(N=cython.Py_ssize_t)
def bm_await_nested(N):
return await_all(
count_to(N),
await_all(
count_to(N),
await_all(*[count_to(COUNT // i) for i in range(1, N+1)]),
count_to(N)),
count_to(N))
def await_one(coro):
a = coro.__await__()
try:
while True:
await_one(next(a))
except StopIteration as exc:
result = exc.args[0] if exc.args else None
else:
result = 0
return result
def time(fn, *args):
from time import time
begin = time()
result = await_one(fn(*args))
end = time()
return result, end-begin
def benchmark(N):
times = []
for _ in range(N):
result, t = time(bm_await_nested, 1000)
times.append(t)
assert result == 8221043302, result
return times
main = benchmark
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description="Micro benchmarks for generators.")
import util
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, benchmark)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/fstrings.py 0000644 0001751 0000171 00000017506 00000000000 021346 0 ustar 00runner docker 0000000 0000000 # coding=utf-8
# NOTE: requires Python 3.6 or later if not compiled with Cython
from time import time
import cython
@cython.locals(x=int, n=int)
def run():
t0 = time()
f = 1.0
x = 2
n = 5
i = 12345678
s = 'abc'
u = u'üöä'
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
# repeat without fast looping ...
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:.2}--{n:2}{n:5}oo{i}"
f"{n}oo{n*10}{f:3.2}--{n:2}{n:5}oo{i}{s}"
f"{n}oo{n*10}{f:5.2}--{n:2}{n:5}oo{i}{u}"
f"{n}oo{n*10}{f:2.2}--{n:2}{n:5}oo{i}{s}xx{u}"
tk = time()
return tk - t0
def main(n):
run() # warmup
times = []
for i in range(n):
times.append(run())
return times
if __name__ == "__main__":
import optparse
import util
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of fstring literal formatting")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/generators.py 0000644 0001751 0000171 00000004041 00000000000 021646 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/python
# micro benchmarks for generators
COUNT = 10000
import cython
@cython.locals(N=cython.Py_ssize_t)
def count_to(N):
for i in range(N):
yield i
@cython.locals(i=cython.Py_ssize_t)
def round_robin(*_iterators):
iterators = list(_iterators)
to_drop = []
while iterators:
for i, it in enumerate(iterators):
try:
value = next(it)
except StopIteration:
to_drop.append(i)
else:
yield value
if to_drop:
for i in reversed(to_drop):
del iterators[i]
del to_drop[:]
def yield_from(*iterators):
for it in iterators:
yield from it
def bm_plain(N):
return count_to(COUNT * N)
def bm_round_robin(N):
return round_robin(*[ count_to(COUNT // i) for i in range(1,N+1) ])
def bm_yield_from(N):
return yield_from(count_to(N),
round_robin(*[ yield_from(count_to(COUNT // i))
for i in range(1,N+1) ]),
count_to(N))
def bm_yield_from_nested(N):
return yield_from(count_to(N),
yield_from(count_to(N),
round_robin(*[ yield_from(count_to(COUNT // i))
for i in range(1,N+1) ]),
count_to(N)),
count_to(N))
def time(fn, *args):
from time import time
begin = time()
result = list(fn(*args))
end = time()
return result, end-begin
def benchmark(N):
times = []
for _ in range(N):
result, t = time(bm_yield_from_nested, 10)
times.append(t)
return times
main = benchmark
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Micro benchmarks for generators."))
import util
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, benchmark)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/hexiom2.pxd 0000644 0001751 0000171 00000003533 00000000000 021220 0 ustar 00runner docker 0000000 0000000 cimport cython
cdef object EMPTY
cdef int IMPOSSIBLE, SOLVED, OPEN
cdef int ASCENDING, DESCENDING
cdef class Dir:
cdef public int x, y
@cython.final
cdef class Done:
cdef public int count
cdef public list cells
cdef Done clone(self)
cdef inline int set_done(self, int i, v) except -123
cdef inline bint already_done(self, int i) except -123
cdef inline bint remove(self, int i, v) except -123
cdef inline bint remove_unfixed(self, v) except -123
cdef int next_cell(self, Pos pos, int strategy=*) except -123
cdef int filter_tiles(self, int* tiles) except -123
cdef int next_cell_min_choice(self) except -123
cdef int next_cell_max_choice(self) except -123
cdef int next_cell_highest_value(self) except -123
cdef int next_cell_first(self) except -123
cdef int next_cell_max_neighbors(self, Pos pos) except -123
cdef int next_cell_min_neighbors(self, Pos pos) except -123
@cython.final
cdef class Node:
cdef public tuple pos
cdef public int id
cdef public list links
@cython.final
cdef class Hex:
cdef public list nodes_by_id
cdef public dict nodes_by_pos
cdef public int size
cdef public int count
cdef int link_nodes(self) except -123
cdef bint contains_pos(self, tuple pos)
cdef Node get_by_pos(self, tuple pos)
cdef Node get_by_id(self, int id)
@cython.final
cdef class Pos:
cdef public Hex hex
cdef public Done done
cdef public int[8] tiles
cdef Pos clone(self)
cdef bint constraint_pass(Pos pos, last_move=*) except -123
cdef list find_moves(Pos pos, int strategy, int order)
cdef inline int play_move(Pos pos, tuple move) except -123
cdef print_pos(Pos pos, output)
cdef int solved(Pos pos, output, bint verbose=*) except -123
cdef int solve_step(Pos prev, int strategy, order, output, bint first=*) except -123
cdef check_valid(Pos pos)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/hexiom2.py 0000644 0001751 0000171 00000041360 00000000000 021055 0 ustar 00runner docker 0000000 0000000 """Benchmark from Laurent Vaucher.
Source: https://github.com/slowfrog/hexiom : hexiom2.py, level36.txt
(Main function tweaked by Armin Rigo.)
"""
from __future__ import division, print_function
import time
from io import StringIO
import cython
##################################
class Dir(object):
def __init__(self, x, y):
self.x = x
self.y = y
DIRS = [ Dir(1, 0),
Dir(-1, 0),
Dir(0, 1),
Dir(0, -1),
Dir(1, 1),
Dir(-1, -1) ]
EMPTY = 7
##################################
class Done(object):
MIN_CHOICE_STRATEGY = 0
MAX_CHOICE_STRATEGY = 1
HIGHEST_VALUE_STRATEGY = 2
FIRST_STRATEGY = 3
MAX_NEIGHBORS_STRATEGY = 4
MIN_NEIGHBORS_STRATEGY = 5
def __init__(self, count, empty=False):
self.count = count
self.cells = None if empty else [[0, 1, 2, 3, 4, 5, 6, EMPTY] for i in range(count)]
def clone(self):
ret = Done(self.count, True)
ret.cells = [self.cells[i][:] for i in range(self.count)]
return ret
def __getitem__(self, i):
return self.cells[i]
def set_done(self, i, v):
self.cells[i] = [v]
def already_done(self, i):
return len(self.cells[i]) == 1
def remove(self, i, v):
if v in self.cells[i]:
self.cells[i].remove(v)
return True
else:
return False
def remove_all(self, v):
for i in range(self.count):
self.remove(i, v)
def remove_unfixed(self, v):
changed = False
for i in range(self.count):
if not self.already_done(i):
if self.remove(i, v):
changed = True
return changed
def filter_tiles(self, tiles):
for v in range(8):
if tiles[v] == 0:
self.remove_all(v)
@cython.locals(i=cython.int)
def next_cell_min_choice(self):
minlen = 10
mini = -1
for i in range(self.count):
if 1 < len(self.cells[i]) < minlen:
minlen = len(self.cells[i])
mini = i
return mini
@cython.locals(i=cython.int)
def next_cell_max_choice(self):
maxlen = 1
maxi = -1
for i in range(self.count):
if maxlen < len(self.cells[i]):
maxlen = len(self.cells[i])
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_highest_value(self):
maxval = -1
maxi = -1
for i in range(self.count):
if (not self.already_done(i)):
maxvali = max([k for k in self.cells[i] if k != EMPTY])
if maxval < maxvali:
maxval = maxvali
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_first(self):
for i in range(self.count):
if (not self.already_done(i)):
return i
return -1
@cython.locals(i=cython.int)
def next_cell_max_neighbors(self, pos):
maxn = -1
maxi = -1
for i in range(self.count):
if not self.already_done(i):
cells_around = pos.hex.get_by_id(i).links
n = sum([1 if (self.already_done(nid) and (self[nid][0] != EMPTY)) else 0
for nid in cells_around])
if n > maxn:
maxn = n
maxi = i
return maxi
@cython.locals(i=cython.int)
def next_cell_min_neighbors(self, pos):
minn = 7
mini = -1
for i in range(self.count):
if not self.already_done(i):
cells_around = pos.hex.get_by_id(i).links
n = sum([1 if (self.already_done(nid) and (self[nid][0] != EMPTY)) else 0
for nid in cells_around])
if n < minn:
minn = n
mini = i
return mini
def next_cell(self, pos, strategy=HIGHEST_VALUE_STRATEGY):
if strategy == Done.HIGHEST_VALUE_STRATEGY:
return self.next_cell_highest_value()
elif strategy == Done.MIN_CHOICE_STRATEGY:
return self.next_cell_min_choice()
elif strategy == Done.MAX_CHOICE_STRATEGY:
return self.next_cell_max_choice()
elif strategy == Done.FIRST_STRATEGY:
return self.next_cell_first()
elif strategy == Done.MAX_NEIGHBORS_STRATEGY:
return self.next_cell_max_neighbors(pos)
elif strategy == Done.MIN_NEIGHBORS_STRATEGY:
return self.next_cell_min_neighbors(pos)
else:
raise Exception("Wrong strategy: %d" % strategy)
##################################
class Node(object):
def __init__(self, pos, id, links):
self.pos = pos
self.id = id
self.links = links
##################################
class Hex(object):
@cython.locals(size=cython.int, id=cython.int, x=cython.int, y=cython.int)
def __init__(self, size):
self.size = size
self.count = 3 * size * (size - 1) + 1
self.nodes_by_id = self.count * [None]
self.nodes_by_pos = {}
id = 0
for y in range(size):
for x in range(size + y):
pos = (x, y)
node = Node(pos, id, [])
self.nodes_by_pos[pos] = node
self.nodes_by_id[node.id] = node
id += 1
for y in range(1, size):
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos = (x, ry)
node = Node(pos, id, [])
self.nodes_by_pos[pos] = node
self.nodes_by_id[node.id] = node
id += 1
@cython.locals(dir=Dir, x=cython.int, y=cython.int, nx=cython.int, ny=cython.int, node=Node)
def link_nodes(self):
for node in self.nodes_by_id:
(x, y) = node.pos
for dir in DIRS:
nx = x + dir.x
ny = y + dir.y
if self.contains_pos((nx, ny)):
node.links.append(self.nodes_by_pos[(nx, ny)].id)
def contains_pos(self, pos):
return pos in self.nodes_by_pos
def get_by_pos(self, pos):
return self.nodes_by_pos[pos]
def get_by_id(self, id):
return self.nodes_by_id[id]
##################################
class Pos(object):
def __init__(self, hex, tiles, done = None):
self.hex = hex
self.tiles = tiles
self.done = Done(hex.count) if done is None else done
def clone(self):
return Pos(self.hex, self.tiles, self.done.clone())
##################################
@cython.locals(pos=Pos, i=cython.long, v=cython.int,
nid=cython.int, num=cython.int,
empties=cython.int, filled=cython.int,
vmax=cython.int, vmin=cython.int, cell=list, left=cython.int[8])
def constraint_pass(pos, last_move=None):
changed = False
left = pos.tiles[:]
done = pos.done
# Remove impossible values from free cells
free_cells = (range(done.count) if last_move is None
else pos.hex.get_by_id(last_move).links)
for i in free_cells:
if not done.already_done(i):
vmax = 0
vmin = 0
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
for num in range(7):
if (num < vmin) or (num > vmax):
if done.remove(i, num):
changed = True
# Computes how many of each value is still free
for cell in done.cells:
if len(cell) == 1:
left[cell[0]] -= 1
for v in range(8):
# If there is none, remove the possibility from all tiles
if (pos.tiles[v] > 0) and (left[v] == 0):
if done.remove_unfixed(v):
changed = True
else:
possible = sum([(1 if v in cell else 0) for cell in done.cells])
# If the number of possible cells for a value is exactly the number of available tiles
# put a tile in each cell
if pos.tiles[v] == possible:
for i in range(done.count):
cell = done.cells[i]
if (not done.already_done(i)) and (v in cell):
done.set_done(i, v)
changed = True
# Force empty or non-empty around filled cells
filled_cells = (range(done.count) if last_move is None
else [last_move])
for i in filled_cells:
if done.already_done(i):
num = done[i][0]
empties = 0
filled = 0
unknown = []
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] == EMPTY:
empties += 1
else:
filled += 1
else:
unknown.append(nid)
if len(unknown) > 0:
if num == filled:
for u in unknown:
if EMPTY in done[u]:
done.set_done(u, EMPTY)
changed = True
#else:
# raise Exception("Houston, we've got a problem")
elif num == filled + len(unknown):
for u in unknown:
if done.remove(u, EMPTY):
changed = True
return changed
ASCENDING = 1
DESCENDING = -1
def find_moves(pos, strategy, order):
done = pos.done
cell_id = done.next_cell(pos, strategy)
if cell_id < 0:
return []
if order == ASCENDING:
return [(cell_id, v) for v in done[cell_id]]
else:
# Try higher values first and EMPTY last
moves = list(reversed([(cell_id, v) for v in done[cell_id] if v != EMPTY]))
if EMPTY in done[cell_id]:
moves.append((cell_id, EMPTY))
return moves
def play_move(pos, move):
(cell_id, i) = move
pos.done.set_done(cell_id, i)
@cython.locals(x=cython.int, y=cython.int, ry=cython.int, id=cython.int)
def print_pos(pos, output):
hex = pos.hex
done = pos.done
size = hex.size
for y in range(size):
print(u" " * (size - y - 1), end=u"", file=output)
for x in range(size + y):
pos2 = (x, y)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else u"."
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
for y in range(1, size):
print(u" " * y, end=u"", file=output)
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos2 = (x, ry)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else (u".")
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
OPEN = 0
SOLVED = 1
IMPOSSIBLE = -1
@cython.locals(i=cython.int, num=cython.int, nid=cython.int,
vmin=cython.int, vmax=cython.int, tiles=cython.int[8])
def solved(pos, output, verbose=False):
hex = pos.hex
tiles = pos.tiles[:]
done = pos.done
exact = True
all_done = True
for i in range(hex.count):
if len(done[i]) == 0:
return IMPOSSIBLE
elif done.already_done(i):
num = done[i][0]
tiles[num] -= 1
if (tiles[num] < 0):
return IMPOSSIBLE
vmax = 0
vmin = 0
if num != EMPTY:
cells_around = hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
if (num < vmin) or (num > vmax):
return IMPOSSIBLE
if num != vmin:
exact = False
else:
all_done = False
if (not all_done) or (not exact):
return OPEN
print_pos(pos, output)
return SOLVED
@cython.locals(move=tuple)
def solve_step(prev, strategy, order, output, first=False):
if first:
pos = prev.clone()
while constraint_pass(pos):
pass
else:
pos = prev
moves = find_moves(pos, strategy, order)
if len(moves) == 0:
return solved(pos, output)
else:
for move in moves:
#print("Trying (%d, %d)" % (move[0], move[1]))
ret = OPEN
new_pos = pos.clone()
play_move(new_pos, move)
#print_pos(new_pos)
while constraint_pass(new_pos, move[0]):
pass
cur_status = solved(new_pos, output)
if cur_status != OPEN:
ret = cur_status
else:
ret = solve_step(new_pos, strategy, order, output)
if ret == SOLVED:
return SOLVED
return IMPOSSIBLE
@cython.locals(tot=cython.int, tiles=cython.int[8])
def check_valid(pos):
hex = pos.hex
tiles = pos.tiles
done = pos.done
# fill missing entries in tiles
tot = 0
for i in range(8):
if tiles[i] > 0:
tot += tiles[i]
else:
tiles[i] = 0
# check total
if tot != hex.count:
raise Exception("Invalid input. Expected %d tiles, got %d." % (hex.count, tot))
def solve(pos, strategy, order, output):
check_valid(pos)
return solve_step(pos, strategy, order, output, first=True)
# TODO Write an 'iterator' to go over all x,y positions
@cython.locals(x=cython.int, y=cython.int, p=cython.int, tiles=cython.int[8],
size=cython.int, inctile=cython.int, linei=cython.int)
def read_file(file):
lines = [line.strip("\r\n") for line in file.splitlines()]
size = int(lines[0])
hex = Hex(size)
linei = 1
tiles = 8 * [0]
done = Done(hex.count)
for y in range(size):
line = lines[linei][size - y - 1:]
p = 0
for x in range(size + y):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, y, hex.get_by_pos((x, y)).id))
done.set_done(hex.get_by_pos((x, y)).id, inctile)
linei += 1
for y in range(1, size):
ry = size - 1 + y
line = lines[linei][y:]
p = 0
for x in range(y, size * 2 - 1):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, ry, hex.get_by_pos((x, ry)).id))
done.set_done(hex.get_by_pos((x, ry)).id, inctile)
linei += 1
hex.link_nodes()
done.filter_tiles(tiles)
return Pos(hex, tiles, done)
def solve_file(file, strategy, order, output):
pos = read_file(file)
solve(pos, strategy, order, output)
def run_level36():
f = """\
4
2 1 1 2
3 3 3 . .
2 3 3 . 4 .
. 2 . 2 4 3 2
2 2 . . . 2
4 3 4 . .
3 2 3 3
"""
order = DESCENDING
strategy = Done.FIRST_STRATEGY
output = StringIO()
solve_file(f, strategy, order, output)
expected = """\
3 4 3 2
3 4 4 . 3
2 . . 3 4 3
2 . 1 . 3 . 2
3 3 . 2 . 2
3 . 2 . 2
2 2 . 1
"""
if output.getvalue() != expected:
raise AssertionError("got a wrong answer:\n%s" % output.getvalue())
def main(n):
# only run 1/25th of the requested number of iterations.
# with the default n=50 from runner.py, this means twice.
l = []
for i in range(n):
t0 = time.time()
run_level36()
time_elapsed = time.time() - t0
l.append(time_elapsed)
return l
if __name__ == "__main__":
import util, optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the hexiom2 benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/meteor_contest.pxd 0000644 0001751 0000171 00000000453 00000000000 022675 0 ustar 00runner docker 0000000 0000000 cimport cython
cdef list rotate(list ido, dict rd=*)
cdef list flip(list ido, dict fd=*)
cdef list permute(list ido, list r_ido)
@cython.locals(n_i_min=long)
cpdef solve(long n, long i_min, free, list curr_board, list pieces_left, list solutions,
list fps=*, list se_nh=*, bisect=*)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/meteor_contest.py 0000644 0001751 0000171 00000010441 00000000000 022530 0 ustar 00runner docker 0000000 0000000 # The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
#
# contributed by Daniel Nanz, 2008-08-21
import optparse
import time
from bisect import bisect
import util
w, h = 5, 10
dir_no = 6
S, E = w * h, 2
SE = S + (E / 2)
SW = SE - E
W, NW, NE = -E, -SE, -SW
def rotate(ido, rd={E: NE, NE: NW, NW: W, W: SW, SW: SE, SE: E}):
return [rd[o] for o in ido]
def flip(ido, fd={E: E, NE: SE, NW: SW, W: W, SW: NW, SE: NE}):
return [fd[o] for o in ido]
def permute(ido, r_ido):
ps = [ido]
for r in range(dir_no - 1):
ps.append(rotate(ps[-1]))
if ido == r_ido: # C2-symmetry
ps = ps[0:dir_no//2]
for pp in ps[:]:
ps.append(flip(pp))
return ps
def convert(ido):
'''incremental direction offsets -> "coordinate offsets" '''
out = [0]
for o in ido:
out.append(out[-1] + o)
return list(set(out))
def get_footprints(board, cti, pieces):
fps = [[[] for p in range(len(pieces))] for ci in range(len(board))]
for c in board:
for pi, p in enumerate(pieces):
for pp in p:
fp = frozenset([cti[c + o] for o in pp if (c + o) in cti])
if len(fp) == 5:
fps[min(fp)][pi].append(fp)
return fps
def get_senh(board, cti):
'''-> south-east neighborhood'''
se_nh = []
nh = [E, SW, SE]
for c in board:
se_nh.append(frozenset([cti[c + o] for o in nh if (c + o) in cti]))
return se_nh
def get_puzzle(w=w, h=h):
board = [E*x + S*y + (y%2) for y in range(h) for x in range(w)]
cti = dict((board[i], i) for i in range(len(board)))
idos = [[E, E, E, SE], # incremental direction offsets
[SE, SW, W, SW],
[W, W, SW, SE],
[E, E, SW, SE],
[NW, W, NW, SE, SW],
[E, E, NE, W],
[NW, NE, NE, W],
[NE, SE, E, NE],
[SE, SE, E, SE],
[E, NW, NW, NW]]
perms = (permute(p, idos[3]) for p in idos) # restrict piece 4
pieces = [[convert(pp) for pp in p] for p in perms]
return (board, cti, pieces)
def print_board(board, w=w, h=h):
for y in range(h):
for x in range(w):
print(board[x + y * w])
print('')
if y % 2 == 0:
print('')
print()
board, cti, pieces = get_puzzle()
fps = get_footprints(board, cti, pieces)
se_nh = get_senh(board, cti)
def solve(n, i_min, free, curr_board, pieces_left, solutions,
fps=fps, se_nh=se_nh, bisect=bisect):
fp_i_cands = fps[i_min]
for p in pieces_left:
fp_cands = fp_i_cands[p]
for fp in fp_cands:
if fp <= free:
n_curr_board = curr_board[:]
for ci in fp:
n_curr_board[ci] = p
if len(pieces_left) > 1:
n_free = free - fp
n_i_min = min(n_free)
if len(n_free & se_nh[n_i_min]) > 0:
n_pieces_left = pieces_left[:]
n_pieces_left.remove(p)
solve(n, n_i_min, n_free, n_curr_board,
n_pieces_left, solutions)
else:
s = ''.join(map(str, n_curr_board))
solutions.insert(bisect(solutions, s), s)
rs = s[::-1]
solutions.insert(bisect(solutions, rs), rs)
if len(solutions) >= n:
return
if len(solutions) >= n:
return
return
SOLVE_ARG = 60
def main(n):
times = []
for i in range(n):
t0 = time.time()
free = frozenset(range(len(board)))
curr_board = [-1] * len(board)
pieces_left = list(range(len(pieces)))
solutions = []
solve(SOLVE_ARG, 0, free, curr_board, pieces_left, solutions)
#print len(solutions), 'solutions found\n'
#for i in (0, -1): print_board(solutions[i])
tk = time.time()
times.append(tk - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the Float benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/nbody.pxd 0000644 0001751 0000171 00000001267 00000000000 020762 0 ustar 00runner docker 0000000 0000000
cimport cython
@cython.locals(x=Py_ssize_t)
cdef combinations(list l)
@cython.locals(x1=double, x2=double, y1=double, y2=double, z1=double, z2=double,
m1=double, m2=double, vx=double, vy=double, vz=double, i=long)
cdef advance(double dt, long n, list bodies=*, list pairs=*)
@cython.locals(x1=double, x2=double, y1=double, y2=double, z1=double, z2=double,
m=double, m1=double, m2=double, vx=double, vy=double, vz=double)
cdef report_energy(list bodies=*, list pairs=*, double e=*)
@cython.locals(vx=double, vy=double, vz=double, m=double)
cdef offset_momentum(tuple ref, list bodies=*, double px=*, double py=*, double pz=*)
cpdef test_nbody(long iterations)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5194333
Cython-0.29.28/Demos/benchmarks/nbody.py 0000644 0001751 0000171 00000010715 00000000000 020615 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
"""N-body benchmark from the Computer Language Benchmarks Game.
This is intended to support Unladen Swallow's perf.py. Accordingly, it has been
modified from the Shootout version:
- Accept standard Unladen Swallow benchmark options.
- Run report_energy()/advance() in a loop.
- Reimplement itertools.combinations() to work with older Python versions.
"""
# Pulled from http://shootout.alioth.debian.org/u64q/benchmark.php?test=nbody&lang=python&id=4
# Contributed by Kevin Carson.
# Modified by Tupteq, Fredrik Johansson, and Daniel Nanz.
__contact__ = "collinwinter@google.com (Collin Winter)"
# Python imports
import optparse
import sys
from time import time
# Local imports
import util
def combinations(l):
"""Pure-Python implementation of itertools.combinations(l, 2)."""
result = []
for x in range(len(l) - 1):
ls = l[x+1:]
for y in ls:
result.append((l[x],y))
return result
PI = 3.14159265358979323
SOLAR_MASS = 4 * PI * PI
DAYS_PER_YEAR = 365.24
BODIES = {
'sun': ([0.0, 0.0, 0.0], [0.0, 0.0, 0.0], SOLAR_MASS),
'jupiter': ([4.84143144246472090e+00,
-1.16032004402742839e+00,
-1.03622044471123109e-01],
[1.66007664274403694e-03 * DAYS_PER_YEAR,
7.69901118419740425e-03 * DAYS_PER_YEAR,
-6.90460016972063023e-05 * DAYS_PER_YEAR],
9.54791938424326609e-04 * SOLAR_MASS),
'saturn': ([8.34336671824457987e+00,
4.12479856412430479e+00,
-4.03523417114321381e-01],
[-2.76742510726862411e-03 * DAYS_PER_YEAR,
4.99852801234917238e-03 * DAYS_PER_YEAR,
2.30417297573763929e-05 * DAYS_PER_YEAR],
2.85885980666130812e-04 * SOLAR_MASS),
'uranus': ([1.28943695621391310e+01,
-1.51111514016986312e+01,
-2.23307578892655734e-01],
[2.96460137564761618e-03 * DAYS_PER_YEAR,
2.37847173959480950e-03 * DAYS_PER_YEAR,
-2.96589568540237556e-05 * DAYS_PER_YEAR],
4.36624404335156298e-05 * SOLAR_MASS),
'neptune': ([1.53796971148509165e+01,
-2.59193146099879641e+01,
1.79258772950371181e-01],
[2.68067772490389322e-03 * DAYS_PER_YEAR,
1.62824170038242295e-03 * DAYS_PER_YEAR,
-9.51592254519715870e-05 * DAYS_PER_YEAR],
5.15138902046611451e-05 * SOLAR_MASS) }
SYSTEM = list(BODIES.values())
PAIRS = combinations(SYSTEM)
def advance(dt, n, bodies=SYSTEM, pairs=PAIRS):
for i in range(n):
for (([x1, y1, z1], v1, m1),
([x2, y2, z2], v2, m2)) in pairs:
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
mag = dt * ((dx * dx + dy * dy + dz * dz) ** (-1.5))
b1m = m1 * mag
b2m = m2 * mag
v1[0] -= dx * b2m
v1[1] -= dy * b2m
v1[2] -= dz * b2m
v2[0] += dx * b1m
v2[1] += dy * b1m
v2[2] += dz * b1m
for (r, [vx, vy, vz], m) in bodies:
r[0] += dt * vx
r[1] += dt * vy
r[2] += dt * vz
def report_energy(bodies=SYSTEM, pairs=PAIRS, e=0.0):
for (((x1, y1, z1), v1, m1),
((x2, y2, z2), v2, m2)) in pairs:
dx = x1 - x2
dy = y1 - y2
dz = z1 - z2
e -= (m1 * m2) / ((dx * dx + dy * dy + dz * dz) ** 0.5)
for (r, [vx, vy, vz], m) in bodies:
e += m * (vx * vx + vy * vy + vz * vz) / 2.
return e
def offset_momentum(ref, bodies=SYSTEM, px=0.0, py=0.0, pz=0.0):
for (r, [vx, vy, vz], m) in bodies:
px -= vx * m
py -= vy * m
pz -= vz * m
(r, v, m) = ref
v[0] = px / m
v[1] = py / m
v[2] = pz / m
def test_nbody(iterations):
# Warm-up runs.
report_energy()
advance(0.01, 20000)
report_energy()
times = []
for _ in range(iterations):
t0 = time()
report_energy()
advance(0.01, 20000)
report_energy()
t1 = time()
times.append(t1 - t0)
return times
main = test_nbody
if __name__ == '__main__':
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Run the n-body benchmark."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
offset_momentum(BODIES['sun']) # Set up global state
util.run_benchmark(options, options.num_runs, test_nbody)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/benchmarks/nqueens.py 0000644 0001751 0000171 00000004576 00000000000 021170 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
"""Simple, brute-force N-Queens solver."""
__author__ = "collinwinter@google.com (Collin Winter)"
# Python imports
import optparse
import re
import string
from time import time
# Local imports
import util
import cython
try:
from builtins import range as _xrange
except ImportError:
from __builtin__ import xrange as _xrange
# Pure-Python implementation of itertools.permutations().
@cython.locals(n=int, i=int, j=int)
def permutations(iterable):
"""permutations(range(3), 2) --> (0,1) (0,2) (1,0) (1,2) (2,0) (2,1)"""
pool = tuple(iterable)
n = len(pool)
indices = list(range(n))
cycles = list(range(1, n+1))[::-1]
yield [ pool[i] for i in indices ]
while n:
for i in reversed(range(n)):
j = cycles[i] - 1
if j == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
cycles[i] = j
indices[i], indices[-j] = indices[-j], indices[i]
yield [ pool[i] for i in indices ]
break
else:
return
# From http://code.activestate.com/recipes/576647/
@cython.locals(queen_count=int, i=int, vec=list)
def n_queens(queen_count):
"""N-Queens solver.
Args:
queen_count: the number of queens to solve for. This is also the
board size.
Yields:
Solutions to the problem. Each yielded value is looks like
(3, 8, 2, 1, 4, ..., 6) where each number is the column position for the
queen, and the index into the tuple indicates the row.
"""
cols = list(range(queen_count))
for vec in permutations(cols):
if (queen_count == len({ vec[i]+i for i in cols })
== len({ vec[i]-i for i in cols })):
yield vec
def test_n_queens(iterations):
# Warm-up runs.
list(n_queens(8))
list(n_queens(8))
times = []
for _ in _xrange(iterations):
t0 = time()
list(n_queens(8))
t1 = time()
times.append(t1 - t0)
return times
main = test_n_queens
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Test the performance of an N-Queens solvers."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_n_queens)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/benchmarks/richards.pxd 0000644 0001751 0000171 00000004571 00000000000 021447 0 ustar 00runner docker 0000000 0000000 cimport cython
@cython.final
cdef class Packet:
cdef public object link
cdef public object ident
cdef public object kind
cdef public Py_ssize_t datum
cdef public list data
cpdef append_to(self,lst)
cdef class TaskRec:
pass
@cython.final
cdef class DeviceTaskRec(TaskRec):
cdef public object pending
@cython.final
cdef class IdleTaskRec(TaskRec):
cdef public long control
cdef public Py_ssize_t count
@cython.final
cdef class HandlerTaskRec(TaskRec):
cdef public object work_in # = None
cdef public object device_in # = None
cpdef workInAdd(self, Packet p)
cpdef deviceInAdd(self, Packet p)
@cython.final
cdef class WorkerTaskRec(TaskRec):
cdef public object destination # = I_HANDLERA
cdef public Py_ssize_t count
cdef class TaskState:
cdef public bint packet_pending # = True
cdef public bint task_waiting # = False
cdef public bint task_holding # = False
cpdef packetPending(self)
cpdef waiting(self)
cpdef running(self)
cpdef waitingWithPacket(self)
cpdef bint isPacketPending(self)
cpdef bint isTaskWaiting(self)
cpdef bint isTaskHolding(self)
cpdef bint isTaskHoldingOrWaiting(self)
cpdef bint isWaitingWithPacket(self)
cdef class TaskWorkArea:
cdef public list taskTab # = [None] * TASKTABSIZE
cdef public object taskList # = None
cdef public Py_ssize_t holdCount # = 0
cdef public Py_ssize_t qpktCount # = 0
cdef class Task(TaskState):
cdef public Task link # = taskWorkArea.taskList
cdef public object ident # = i
cdef public object priority # = p
cdef public object input # = w
cdef public object handle # = r
cpdef addPacket(self,Packet p,Task old)
cpdef runTask(self)
cpdef waitTask(self)
cpdef hold(self)
cpdef release(self,i)
cpdef qpkt(self,Packet pkt)
cpdef findtcb(self,id)
cdef class DeviceTask(Task):
@cython.locals(d=DeviceTaskRec)
cpdef fn(self,Packet pkt,DeviceTaskRec r)
cdef class HandlerTask(Task):
@cython.locals(h=HandlerTaskRec)
cpdef fn(self,Packet pkt,HandlerTaskRec r)
cdef class IdleTask(Task):
@cython.locals(i=IdleTaskRec)
cpdef fn(self,Packet pkt,IdleTaskRec r)
cdef class WorkTask(Task):
@cython.locals(w=WorkerTaskRec)
cpdef fn(self,Packet pkt,WorkerTaskRec r)
@cython.locals(t=Task)
cpdef schedule()
cdef class Richards:
cpdef run(self, iterations)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/benchmarks/richards.py 0000644 0001751 0000171 00000024340 00000000000 021300 0 ustar 00runner docker 0000000 0000000 # based on a Java version:
# Based on original version written in BCPL by Dr Martin Richards
# in 1981 at Cambridge University Computer Laboratory, England
# and a C++ version derived from a Smalltalk version written by
# L Peter Deutsch.
# Java version: Copyright (C) 1995 Sun Microsystems, Inc.
# Translation from C++, Mario Wolczko
# Outer loop added by Alex Jacoby
# Task IDs
I_IDLE = 1
I_WORK = 2
I_HANDLERA = 3
I_HANDLERB = 4
I_DEVA = 5
I_DEVB = 6
# Packet types
K_DEV = 1000
K_WORK = 1001
# Packet
BUFSIZE = 4
BUFSIZE_RANGE = range(BUFSIZE)
class Packet(object):
def __init__(self,l,i,k):
self.link = l
self.ident = i
self.kind = k
self.datum = 0
self.data = [0] * BUFSIZE
def append_to(self,lst):
self.link = None
if lst is None:
return self
else:
p = lst
next = p.link
while next is not None:
p = next
next = p.link
p.link = self
return lst
# Task Records
class TaskRec(object):
pass
class DeviceTaskRec(TaskRec):
def __init__(self):
self.pending = None
class IdleTaskRec(TaskRec):
def __init__(self):
self.control = 1
self.count = 10000
class HandlerTaskRec(TaskRec):
def __init__(self):
self.work_in = None
self.device_in = None
def workInAdd(self,p):
self.work_in = p.append_to(self.work_in)
return self.work_in
def deviceInAdd(self,p):
self.device_in = p.append_to(self.device_in)
return self.device_in
class WorkerTaskRec(TaskRec):
def __init__(self):
self.destination = I_HANDLERA
self.count = 0
# Task
class TaskState(object):
def __init__(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
def packetPending(self):
self.packet_pending = True
self.task_waiting = False
self.task_holding = False
return self
def waiting(self):
self.packet_pending = False
self.task_waiting = True
self.task_holding = False
return self
def running(self):
self.packet_pending = False
self.task_waiting = False
self.task_holding = False
return self
def waitingWithPacket(self):
self.packet_pending = True
self.task_waiting = True
self.task_holding = False
return self
def isPacketPending(self):
return self.packet_pending
def isTaskWaiting(self):
return self.task_waiting
def isTaskHolding(self):
return self.task_holding
def isTaskHoldingOrWaiting(self):
return self.task_holding or (not self.packet_pending and self.task_waiting)
def isWaitingWithPacket(self):
return self.packet_pending and self.task_waiting and not self.task_holding
tracing = False
layout = 0
def trace(a):
global layout
layout -= 1
if layout <= 0:
print()
layout = 50
print(a, end='')
TASKTABSIZE = 10
class TaskWorkArea(object):
def __init__(self):
self.taskTab = [None] * TASKTABSIZE
self.taskList = None
self.holdCount = 0
self.qpktCount = 0
taskWorkArea = TaskWorkArea()
class Task(TaskState):
def __init__(self,i,p,w,initialState,r):
self.link = taskWorkArea.taskList
self.ident = i
self.priority = p
self.input = w
self.packet_pending = initialState.isPacketPending()
self.task_waiting = initialState.isTaskWaiting()
self.task_holding = initialState.isTaskHolding()
self.handle = r
taskWorkArea.taskList = self
taskWorkArea.taskTab[i] = self
def fn(self,pkt,r):
raise NotImplementedError
def addPacket(self,p,old):
if self.input is None:
self.input = p
self.packet_pending = True
if self.priority > old.priority:
return self
else:
p.append_to(self.input)
return old
def runTask(self):
if self.isWaitingWithPacket():
msg = self.input
self.input = msg.link
if self.input is None:
self.running()
else:
self.packetPending()
else:
msg = None
return self.fn(msg,self.handle)
def waitTask(self):
self.task_waiting = True
return self
def hold(self):
taskWorkArea.holdCount += 1
self.task_holding = True
return self.link
def release(self,i):
t = self.findtcb(i)
t.task_holding = False
if t.priority > self.priority:
return t
else:
return self
def qpkt(self,pkt):
t = self.findtcb(pkt.ident)
taskWorkArea.qpktCount += 1
pkt.link = None
pkt.ident = self.ident
return t.addPacket(pkt,self)
def findtcb(self,id):
t = taskWorkArea.taskTab[id]
if t is None:
raise Exception("Bad task id %d" % id)
return t
# DeviceTask
class DeviceTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
d = r
assert isinstance(d, DeviceTaskRec)
if pkt is None:
pkt = d.pending
if pkt is None:
return self.waitTask()
else:
d.pending = None
return self.qpkt(pkt)
else:
d.pending = pkt
if tracing: trace(pkt.datum)
return self.hold()
class HandlerTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
h = r
assert isinstance(h, HandlerTaskRec)
if pkt is not None:
if pkt.kind == K_WORK:
h.workInAdd(pkt)
else:
h.deviceInAdd(pkt)
work = h.work_in
if work is None:
return self.waitTask()
count = work.datum
if count >= BUFSIZE:
h.work_in = work.link
return self.qpkt(work)
dev = h.device_in
if dev is None:
return self.waitTask()
h.device_in = dev.link
dev.datum = work.data[count]
work.datum = count + 1
return self.qpkt(dev)
# IdleTask
class IdleTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,0,None,s,r)
def fn(self,pkt,r):
i = r
assert isinstance(i, IdleTaskRec)
i.count -= 1
if i.count == 0:
return self.hold()
elif i.control & 1 == 0:
i.control //= 2
return self.release(I_DEVA)
else:
i.control = i.control//2 ^ 0xd008
return self.release(I_DEVB)
# WorkTask
A = ord('A')
class WorkTask(Task):
def __init__(self,i,p,w,s,r):
Task.__init__(self,i,p,w,s,r)
def fn(self,pkt,r):
w = r
assert isinstance(w, WorkerTaskRec)
if pkt is None:
return self.waitTask()
if w.destination == I_HANDLERA:
dest = I_HANDLERB
else:
dest = I_HANDLERA
w.destination = dest
pkt.ident = dest
pkt.datum = 0
for i in BUFSIZE_RANGE: # range(BUFSIZE)
w.count += 1
if w.count > 26:
w.count = 1
pkt.data[i] = A + w.count - 1
return self.qpkt(pkt)
import time
def schedule():
t = taskWorkArea.taskList
while t is not None:
pkt = None
if tracing:
print("tcb =", t.ident)
if t.isTaskHoldingOrWaiting():
t = t.link
else:
if tracing: trace(chr(ord("0")+t.ident))
t = t.runTask()
class Richards(object):
def run(self, iterations):
for i in range(iterations):
taskWorkArea.holdCount = 0
taskWorkArea.qpktCount = 0
IdleTask(I_IDLE, 1, 10000, TaskState().running(), IdleTaskRec())
wkq = Packet(None, 0, K_WORK)
wkq = Packet(wkq , 0, K_WORK)
WorkTask(I_WORK, 1000, wkq, TaskState().waitingWithPacket(), WorkerTaskRec())
wkq = Packet(None, I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
wkq = Packet(wkq , I_DEVA, K_DEV)
HandlerTask(I_HANDLERA, 2000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = Packet(None, I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
wkq = Packet(wkq , I_DEVB, K_DEV)
HandlerTask(I_HANDLERB, 3000, wkq, TaskState().waitingWithPacket(), HandlerTaskRec())
wkq = None;
DeviceTask(I_DEVA, 4000, wkq, TaskState().waiting(), DeviceTaskRec());
DeviceTask(I_DEVB, 5000, wkq, TaskState().waiting(), DeviceTaskRec());
schedule()
if taskWorkArea.holdCount == 9297 and taskWorkArea.qpktCount == 23246:
pass
else:
return False
return True
def entry_point(iterations):
r = Richards()
startTime = time.time()
result = r.run(iterations)
endTime = time.time()
return result, startTime, endTime
def main(iterations = 10, entry_point = entry_point):
print("Richards benchmark (Python) starting... [%r]" % entry_point)
result, startTime, endTime = entry_point(iterations)
if not result:
print("Incorrect results!")
return -1
print("finished.")
total_s = endTime - startTime
print("Total time for %d iterations: %.2f secs" % (iterations, total_s))
print("Average time per iteration: %.2f ms" % (total_s*1000/iterations))
return 42
try:
import sys
if '-nojit' in sys.argv:
sys.argv.remove('-nojit')
raise ImportError
import pypyjit
except ImportError:
pass
else:
import types
for item in globals().values():
if isinstance(item, types.FunctionType):
pypyjit.enable(item.func_code)
elif isinstance(item, type):
for it in item.__dict__.values():
if isinstance(it, types.FunctionType):
pypyjit.enable(it.func_code)
if __name__ == '__main__':
import sys
if len(sys.argv) >= 2:
main(iterations = int(sys.argv[1]))
else:
main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/benchmarks/setup.py 0000644 0001751 0000171 00000000517 00000000000 020641 0 ustar 00runner docker 0000000 0000000 from distutils.core import setup
from Cython.Build import cythonize
directives = {
'optimize.inline_defnode_calls': True
}
setup(
name = 'benchmarks',
ext_modules = cythonize("*.py", language_level=3, annotate=True,
compiler_directives=directives,
exclude=["setup.py"]),
)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/benchmarks/spectralnorm.pxd 0000644 0001751 0000171 00000000641 00000000000 022353 0 ustar 00runner docker 0000000 0000000
cimport cython
cdef inline double eval_A(double i, double j)
@cython.locals(i=long)
cdef list eval_A_times_u(list u)
@cython.locals(i=long)
cdef list eval_At_times_u(list u)
cdef list eval_AtA_times_u(list u)
@cython.locals(j=long, u_j=double, partial_sum=double)
cdef double part_A_times_u(double i, list u)
@cython.locals(j=long, u_j=double, partial_sum=double)
cdef double part_At_times_u(double i, list u)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/benchmarks/spectralnorm.py 0000644 0001751 0000171 00000003131 00000000000 022205 0 ustar 00runner docker 0000000 0000000 # -*- coding: utf-8 -*-
# The Computer Language Benchmarks Game
# http://shootout.alioth.debian.org/
# Contributed by Sebastien Loisel
# Fixed by Isaac Gouy
# Sped up by Josh Goldfoot
# Dirtily sped up by Simon Descarpentries
# Concurrency by Jason Stitt
from time import time
import util
import optparse
def eval_A (i, j):
return 1.0 / ((i + j) * (i + j + 1) / 2 + i + 1)
def eval_A_times_u (u):
return [ part_A_times_u(i,u) for i in range(len(u)) ]
def eval_At_times_u (u):
return [ part_At_times_u(i,u) for i in range(len(u)) ]
def eval_AtA_times_u (u):
return eval_At_times_u (eval_A_times_u (u))
def part_A_times_u(i, u):
partial_sum = 0
for j, u_j in enumerate(u):
partial_sum += eval_A (i, j) * u_j
return partial_sum
def part_At_times_u(i, u):
partial_sum = 0
for j, u_j in enumerate(u):
partial_sum += eval_A (j, i) * u_j
return partial_sum
DEFAULT_N = 130
def main(n):
times = []
for i in range(n):
t0 = time()
u = [1] * DEFAULT_N
for dummy in range (10):
v = eval_AtA_times_u (u)
u = eval_AtA_times_u (v)
vBv = vv = 0
for ue, ve in zip (u, v):
vBv += ue * ve
vv += ve * ve
tk = time()
times.append(tk - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the spectralnorm benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/benchmarks/util.py 0000644 0001751 0000171 00000003514 00000000000 020456 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
"""Utility code for benchmark scripts."""
__author__ = "collinwinter@google.com (Collin Winter)"
import math
import operator
try:
reduce
except NameError:
from functools import reduce
def run_benchmark(options, num_runs, bench_func, *args):
"""Run the given benchmark, print results to stdout.
Args:
options: optparse.Values instance.
num_runs: number of times to run the benchmark
bench_func: benchmark function. `num_runs, *args` will be passed to this
function. This should return a list of floats (benchmark execution
times).
"""
if options.profile:
import cProfile
prof = cProfile.Profile()
prof.runcall(bench_func, num_runs, *args)
prof.print_stats(sort=options.profile_sort)
else:
data = bench_func(num_runs, *args)
if options.take_geo_mean:
product = reduce(operator.mul, data, 1)
print(math.pow(product, 1.0 / len(data)))
else:
for x in data:
print(x)
def add_standard_options_to(parser):
"""Add a bunch of common command-line flags to an existing OptionParser.
This function operates on `parser` in-place.
Args:
parser: optparse.OptionParser instance.
"""
parser.add_option("-n", action="store", type="int", default=100,
dest="num_runs", help="Number of times to run the test.")
parser.add_option("--profile", action="store_true",
help="Run the benchmark through cProfile.")
parser.add_option("--profile_sort", action="store", type="str",
default="time", help="Column to sort cProfile output by.")
parser.add_option("--take_geo_mean", action="store_true",
help="Return the geo mean, rather than individual data.")
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/Demos/callback/ 0000755 0001751 0000171 00000000000 00000000000 016543 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/callback/Makefile 0000644 0001751 0000171 00000000243 00000000000 020202 0 ustar 00runner docker 0000000 0000000 all:
python Setup.py build_ext --inplace
test: all
python run_cheese.py
clean:
@echo Cleaning Demos/callback
@rm -f cheese.c *.o *.so *~ core
@rm -rf build
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/callback/Makefile.nodistutils 0000644 0001751 0000171 00000000503 00000000000 022561 0 ustar 00runner docker 0000000 0000000 PYHOME = $(HOME)/pkg/python/version
PYINCLUDE = \
-I$(PYHOME)/include/python2.2 \
-I$(PYHOME)/$(ARCH)/include/python2.2
%.c: %.pyx
../../bin/cython $<
%.o: %.c
gcc -c -fPIC $(PYINCLUDE) $<
%.so: %.o
gcc -shared $< -lm -o $@
all: cheese.so
clean:
@echo Cleaning Demos/callback
@rm -f *.c *.o *.so *~ core core.*
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/callback/README.rst 0000644 0001751 0000171 00000000550 00000000000 020232 0 ustar 00runner docker 0000000 0000000 This example demonstrates how you can wrap a C API
that has a callback interface, so that you can
pass Python functions to it as callbacks.
The files ``cheesefinder.h`` and ``cheesefinder.c``
represent the C library to be wrapped.
The file ``cheese.pyx`` is the Cython module
which wraps it.
The file ``run_cheese.py`` demonstrates how to
call the wrapper.
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/callback/Setup.py 0000644 0001751 0000171 00000000352 00000000000 020215 0 ustar 00runner docker 0000000 0000000 from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
setup(
name = 'callback',
ext_modules=cythonize([
Extension("cheese", ["cheese.pyx", "cheesefinder.c"]),
]),
)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/callback/cheese.pyx 0000644 0001751 0000171 00000000522 00000000000 020540 0 ustar 00runner docker 0000000 0000000 #
# Cython wrapper for the cheesefinder API
#
cdef extern from "cheesefinder.h":
ctypedef void (*cheesefunc)(char *name, void *user_data)
void find_cheeses(cheesefunc user_func, void *user_data)
def find(f):
find_cheeses(callback, f)
cdef void callback(char *name, void *f):
(f)(name.decode('utf-8'))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/callback/cheesefinder.c 0000644 0001751 0000171 00000000503 00000000000 021331 0 ustar 00runner docker 0000000 0000000 /*
* An example of a C API that provides a callback mechanism.
*/
#include "cheesefinder.h"
static char *cheeses[] = {
"cheddar",
"camembert",
"that runny one",
0
};
void find_cheeses(cheesefunc user_func, void *user_data) {
char **p = cheeses;
while (*p) {
user_func(*p, user_data);
++p;
}
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/callback/cheesefinder.h 0000644 0001751 0000171 00000000163 00000000000 021340 0 ustar 00runner docker 0000000 0000000 typedef void (*cheesefunc)(char *name, void *user_data);
void find_cheeses(cheesefunc user_func, void *user_data);
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/callback/run_cheese.py 0000644 0001751 0000171 00000000151 00000000000 021232 0 ustar 00runner docker 0000000 0000000 import cheese
def report_cheese(name):
print("Found cheese: " + name)
cheese.find(report_cheese)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/Demos/embed/ 0000755 0001751 0000171 00000000000 00000000000 016063 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/embed/Makefile 0000644 0001751 0000171 00000004336 00000000000 017531 0 ustar 00runner docker 0000000 0000000 # Makefile for creating our standalone Cython program
PYTHON := python
PYVERSION := $(shell $(PYTHON) -c "import sys; print(sys.version[:3])")
PYPREFIX := $(shell $(PYTHON) -c "import sys; print(sys.prefix)")
INCDIR := $(shell $(PYTHON) -c "from distutils import sysconfig; print(sysconfig.get_python_inc())")
PLATINCDIR := $(shell $(PYTHON) -c "from distutils import sysconfig; print(sysconfig.get_python_inc(plat_specific=True))")
LIBDIR1 := $(shell $(PYTHON) -c "from distutils import sysconfig; print(sysconfig.get_config_var('LIBDIR'))")
LIBDIR2 := $(shell $(PYTHON) -c "from distutils import sysconfig; print(sysconfig.get_config_var('LIBPL'))")
PYLIB := $(shell $(PYTHON) -c "from distutils import sysconfig; print(sysconfig.get_config_var('LIBRARY')[3:-2])")
CC := $(shell $(PYTHON) -c "import distutils.sysconfig; print(distutils.sysconfig.get_config_var('CC'))")
LINKCC := $(shell $(PYTHON) -c "import distutils.sysconfig; print(distutils.sysconfig.get_config_var('LINKCC'))")
LINKFORSHARED := $(shell $(PYTHON) -c "import distutils.sysconfig; print(distutils.sysconfig.get_config_var('LINKFORSHARED'))")
LIBS := $(shell $(PYTHON) -c "import distutils.sysconfig; print(distutils.sysconfig.get_config_var('LIBS'))")
SYSLIBS := $(shell $(PYTHON) -c "import distutils.sysconfig; print(distutils.sysconfig.get_config_var('SYSLIBS'))")
.PHONY: paths all clean test
paths:
@echo "PYTHON=$(PYTHON)"
@echo "PYVERSION=$(PYVERSION)"
@echo "PYPREFIX=$(PYPREFIX)"
@echo "INCDIR=$(INCDIR)"
@echo "PLATINCDIR=$(PLATINCDIR)"
@echo "LIBDIR1=$(LIBDIR1)"
@echo "LIBDIR2=$(LIBDIR2)"
@echo "PYLIB=$(PYLIB)"
@echo "CC=$(CC)"
@echo "LINKCC=$(LINKCC)"
@echo "LINKFORSHARED=$(LINKFORSHARED)"
@echo "LIBS=$(LIBS)"
@echo "SYSLIBS=$(SYSLIBS)"
embedded: embedded.o
$(LINKCC) -o $@ $^ -L$(LIBDIR1) -L$(LIBDIR2) -l$(PYLIB) $(LIBS) $(SYSLIBS) $(LINKFORSHARED)
embedded.o: embedded.c
$(CC) -c $^ -I$(INCDIR) -I$(PLATINCDIR)
CYTHON := ../../cython.py
embedded.c: embedded.pyx
@$(PYTHON) $(CYTHON) --embed embedded.pyx
all: embedded
clean:
@echo Cleaning Demos/embed
@rm -f *~ *.o *.so core core.* *.c embedded test.output
test: clean all
LD_LIBRARY_PATH=$(LIBDIR1):$$LD_LIBRARY_PATH ./embedded > test.output
$(PYTHON) assert_equal.py embedded.output test.output
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/embed/Makefile.msc 0000644 0001751 0000171 00000001755 00000000000 020314 0 ustar 00runner docker 0000000 0000000 # Makefile for Microsoft C Compiler, building a DLL
PYVERSION = 2.2
PYHOME = \Python$(PYVERSION:.=)
PYINCLUDE = -I$(PYHOME)\include
PYLIB = /LIBPATH:$(PYHOME)\libs
CFLAGS = $(PYINCLUDE) /Ox /W3 /GX -nologo
.SUFFIXES: .exe .dll .obj .c .cpp .pyx
.pyx.c:
$(PYHOME)\Python.exe ../../cython.py $<
all: main.exe
clean:
del /Q/F *.obj embedded.h embedded.c main.exe embedded.dll embedded.lib embedded.exp
# When linking the DLL we must explicitly list all of the exports
# There doesn't seem to be an easy way to get DL_EXPORT to have the correct definition
# to do the export for us without breaking the importing of symbols from the core
# python library.
embedded.dll: embedded.obj
link /nologo /DLL /INCREMENTAL:NO $(PYLIB) $** /IMPLIB:$*.lib /DEF:<< /OUT:$*.dll
EXPORTS initembedded
EXPORTS spam
<<
main.exe: main.obj embedded.lib
link /nologo $** $(PYLIB) /OUT:main.exe
embedded.h: embedded.c
main.obj: embedded.h
embedded.obj: embedded.c
$(CC) /MD $(CFLAGS) -c $**
embedded.lib: embedded.dll
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/embed/Makefile.msc.static 0000644 0001751 0000171 00000000764 00000000000 021601 0 ustar 00runner docker 0000000 0000000 # Makefile for Microsoft compiler statically linking
PYVERSION = 2.2
PYHOME = \Python$(PYVERSION:.=)
PYINCLUDE = -I$(PYHOME)\include
PYLIB = /LIBPATH:$(PYHOME)\libs python22.lib
CFLAGS = $(PYINCLUDE) /Ox /W3 /GX -nologo
.SUFFIXES: .exe .dll .obj .c .cpp .pyx
.pyx.c:
$(PYHOME)\Python.exe ../../cython.py $<
all: main.exe
clean:
-del /Q/F *.obj embedded.h embedded.c main.exe
main.exe: main.obj embedded.obj
link /nologo $** $(PYLIB) /OUT:main.exe
embedded.h: embedded.c
main.obj: embedded.h
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/embed/Makefile.unix 0000644 0001751 0000171 00000000630 00000000000 020504 0 ustar 00runner docker 0000000 0000000 # Makefile for creating our standalone Cython program
PYVERSION=2.3
PYPREFIX=/usr
INCLUDES=-I$(PYPREFIX)/include/python$(PYVERSION)
embedded: embedded.o
gcc -o $@ $^ -lpython$(PYVERSION)
embedded.o: embedded.c
gcc -c $^ $(INCLUDES)
embedded.c: embedded.pyx
@python ../../cython.py --embed embedded.pyx
all: embedded
clean:
@echo Cleaning Demos/embed
@rm -f *~ *.o *.so core core.* *.c embedded
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/embed/README.rst 0000644 0001751 0000171 00000000313 00000000000 017547 0 ustar 00runner docker 0000000 0000000 This example demonstrates how Cython-generated code
can be called directly from a main program written in C.
The Windows makefiles were contributed by
Duncan Booth: Duncan.Booth@SuttonCourtenay.org.uk.
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/embed/assert_equal.py 0000644 0001751 0000171 00000000432 00000000000 021124 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import, print_function
import sys
f1 = open(sys.argv[1])
f2 = open(sys.argv[2])
try:
if f1.read() != f2.read():
print("Files differ")
sys.exit(1)
else:
print("Files identical")
finally:
f1.close()
f2.close()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/embed/embedded.output 0000644 0001751 0000171 00000000033 00000000000 021072 0 ustar 00runner docker 0000000 0000000 __main__
Hi, I'm embedded.
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/embed/embedded.pyx 0000644 0001751 0000171 00000000216 00000000000 020355 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3
print(__name__)
if __name__ == "__main__":
print("Hi, I'm embedded.")
else:
print("I'm being imported.")
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/Demos/freeze/ 0000755 0001751 0000171 00000000000 00000000000 016267 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/freeze/Makefile 0000644 0001751 0000171 00000003117 00000000000 017731 0 ustar 00runner docker 0000000 0000000 CC = gcc
CYTHON = ../../bin/cython
CYTHON_FREEZE = ../../bin/cython_freeze
PYTHON = python
RST2HTML = rst2html
PY_LDFLAGS = $(shell $(PYTHON) -c 'from distutils.sysconfig import get_config_var as g; import sys; sys.stdout.write(" ".join([g("LINKFORSHARED"), "-L"+g("LIBPL")]) + "\n")')
PY_CPPFLAGS = $(shell $(PYTHON) -c 'from distutils.sysconfig import *; import sys; sys.stdout.write("-I"+get_python_inc() + "\n")')
LIBDIR1 := $(shell $(PYTHON) -c "from distutils import sysconfig; print(sysconfig.get_config_var('LIBDIR'))")
LIBDIR2 := $(shell $(PYTHON) -c "from distutils import sysconfig; print(sysconfig.get_config_var('LIBPL'))")
PYLIB := $(shell $(PYTHON) -c "from distutils import sysconfig; print(sysconfig.get_config_var('LIBRARY')[3:-2])")
LIBS := $(shell $(PYTHON) -c "import distutils.sysconfig; print(distutils.sysconfig.get_config_var('LIBS'))")
CFLAGS = -fPIC -fno-strict-aliasing -g -O2 -Wall -Wextra
CPPFLAGS = $(PY_CPPFLAGS)
LDFLAGS = $(PY_LDFLAGS)
LDLIBS = -L$(LIBDIR1) -L$(LIBDIR2) -l$(PYLIB)
# Name of executable
TARGETS = nCr python
# List of Cython source files, with main module first.
CYTHON_SOURCE = combinatorics.pyx lcmath.pyx
CYTHON_SECONDARY = $(CYTHON_SOURCE:.pyx=.c) $(TARGETS:=.c)
all : $(TARGETS)
html : README.html
$(TARGETS) : % : %.o $(CYTHON_SOURCE:.pyx=.o)
nCr.c :
$(CYTHON_FREEZE) $(CYTHON_SOURCE:.pyx=) > $@
python.c :
$(CYTHON_FREEZE) --pymain $(CYTHON_SOURCE:.pyx=) > $@
%.c : %.pyx
$(CYTHON) $(CYTHONFLAGS) $^
%.html : %.txt
$(RST2HTML) $^ $@
clean:
$(RM) *.o $(CYTHON_SECONDARY) $(TARGETS) README.html
.PHONY: clean
.SECONDARY: $(CYTHON_SECONDARY)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/freeze/README.rst 0000644 0001751 0000171 00000007040 00000000000 017757 0 ustar 00runner docker 0000000 0000000 NAME
====
**cython_freeze** - create a C file for embedding Cython modules
SYNOPSIS
========
::
cython_freeze [-o outfile] [-p] module [...]
DESCRIPTION
===========
**cython_freeze** generates a C source file to embed a Python interpreter
with one or more Cython modules built in. This allows one to create a single
executable from Cython code, without having to have separate shared objects
for each Cython module. A major advantage of this approach is that it allows
debugging with gprof(1), which does not work with shared objects.
Unless ``-p`` is given, the first module's ``__name__`` is set to
``"__main__"`` and is imported on startup; if ``-p`` is given, a normal Python
interpreter is built, with the given modules built into the binary.
Note that this method differs from ``cython --embed``. The ``--embed`` options
modifies the resulting C source file to include a ``main()`` function, so it
can only be used on a single Cython module. The advantage ``--embed`` is
simplicity. This module, on the other hand, can be used with multiple
modules, but it requires another C source file to be created.
OPTIONS
=======
::
-o FILE, --outfile=FILE write output to FILE instead of standard output
-p, --pymain do not automatically run the first module as __main__
EXAMPLE
=======
In the ``Demos/freeze`` directory, there exist two Cython modules:
* ``lcmath.pyx``: A module that interfaces with the -lm library.
* ``combinatorics.pyx``: A module that implements n-choose-r using lcmath.
Both modules have the Python idiom ``if __name__ == "__main__"``, which only
execute if that module is the "main" module. If run as main, lcmath prints the
factorial of the argument, while combinatorics prints n-choose-r.
The provided Makefile creates an executable, *nCr*, using combinatorics as the
"main" module. It basically performs the following (ignoring the compiler
flags)::
$ cython_freeze combinatorics lcmath > nCr.c
$ cython combinatorics.pyx
$ cython lcmath.pyx
$ gcc -c nCr.c
$ gcc -c combinatorics.c
$ gcc -c lcmath.c
$ gcc nCr.o combinatorics.o lcmath.o -o nCr
Because the combinatorics module was listed first, its ``__name__`` is set
to ``"__main__"``, while lcmath's is set to ``"lcmath"``. The executable now
contains a Python interpreter and both Cython modules. ::
$ ./nCr
USAGE: ./nCr n r
Prints n-choose-r.
$ ./nCr 15812351235 12
5.10028093999e+113
You may wish to build a normal Python interpreter, rather than having one
module as "main". This may happen if you want to use your module from an
interactive shell or from another script, yet you still want it statically
linked so you can profile it with gprof. To do this, add the ``--pymain``
flag to ``cython_freeze``. In the Makefile, the *python* executable is built
like this. ::
$ cython_freeze --pymain combinatorics lcmath -o python.c
$ gcc -c python.c
$ gcc python.o combinatorics.o lcmath.o -o python
Now ``python`` is a normal Python interpreter, but the lcmath and combinatorics
modules will be built into the executable. ::
$ ./python
Python 2.6.2 (release26-maint, Apr 19 2009, 01:58:18)
[GCC 4.3.3] on linux2
Type "help", "copyright", "credits" or "license" for more information.
>>> import lcmath
>>> lcmath.factorial(155)
4.7891429014634364e+273
PREREQUISITES
=============
Cython 0.11.2 (or newer, assuming the API does not change)
SEE ALSO
========
* `Python `_
* `Cython `_
* `freeze.py `_
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/freeze/combinatorics.pyx 0000644 0001751 0000171 00000000736 00000000000 021673 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3
import lcmath
def nCr(n, r):
"""Return the number of ways to choose r elements of a set of n."""
return lcmath.exp(
lcmath.lfactorial(n) -
lcmath.lfactorial(r) -
lcmath.lfactorial(n-r)
)
if __name__ == "__main__":
import sys
if len(sys.argv) != 3:
sys.stderr.write("USAGE: %s n r\nPrints n-choose-r.\n" % sys.argv[0])
sys.exit(2)
n, r = map(float, sys.argv[1:])
print(nCr(n, r))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/freeze/lcmath.pyx 0000644 0001751 0000171 00000001137 00000000000 020303 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3
cdef extern from "math.h":
double c_lgamma "lgamma" (double)
double c_exp "exp" (double)
def exp(n):
"""Return e**n."""
return c_exp(n)
def lfactorial(n):
"""Return an estimate of the log factorial of n."""
return c_lgamma(n+1)
def factorial(n):
"""Return an estimate of the factorial of n."""
return c_exp( c_lgamma(n+1) )
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
sys.stderr.write("USAGE: %s n\nPrints n!.\n" % sys.argv[0])
sys.exit(2)
n, = map(float, sys.argv[1:])
print(factorial(n))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/integrate0.py 0000644 0001751 0000171 00000000224 00000000000 017421 0 ustar 00runner docker 0000000 0000000 def f(x):
return x**2-x
def integrate_f(a, b, N):
s = 0.0
dx = (b-a)/N
for i in range(N):
s += f(a+i*dx)
return s * dx
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/integrate1.pyx 0000644 0001751 0000171 00000000261 00000000000 017613 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3
def f(x):
return x**2-x
def integrate_f(a, b, N):
s = 0.0
dx = (b-a)/N
for i in range(N):
s += f(a+i*dx)
return s * dx
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/integrate2.pyx 0000644 0001751 0000171 00000000354 00000000000 017617 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3
cdef double f(double x) except? -2:
return x**2-x
def integrate_f(double a, double b, int N):
cdef int i
s = 0.0
dx = (b-a)/N
for i in range(N):
s += f(a+i*dx)
return s * dx
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/integrate_timing.py 0000644 0001751 0000171 00000000653 00000000000 020716 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import, print_function
import timeit
import integrate0, integrate1, integrate2
number = 10
py_time = None
for m in ('integrate0', 'integrate1', 'integrate2'):
print(m)
t = min(timeit.repeat("integrate_f(0.0, 10.0, 100000)", "from %s import integrate_f" % m, number=number))
if py_time is None:
py_time = t
print(" ", t / number, "s")
print(" ", py_time / t)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/Demos/libraries/ 0000755 0001751 0000171 00000000000 00000000000 016763 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/libraries/call_mymath.pyx 0000644 0001751 0000171 00000000133 00000000000 022014 0 ustar 00runner docker 0000000 0000000 cdef extern from "mymath.h":
double sinc(double)
def call_sinc(x):
return sinc(x)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/libraries/mymath.c 0000644 0001751 0000171 00000000117 00000000000 020425 0 ustar 00runner docker 0000000 0000000 #include "math.h"
double sinc(double x) {
return x == 0 ? 1 : sin(x)/x;
}
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/libraries/mymath.h 0000644 0001751 0000171 00000000025 00000000000 020430 0 ustar 00runner docker 0000000 0000000 double sinc(double);
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/libraries/setup.py 0000644 0001751 0000171 00000001655 00000000000 020504 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import, print_function
import os
import sys
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
# For demo purposes, we build our own tiny library.
try:
print("building libmymath.a")
assert os.system("gcc -shared -fPIC -c mymath.c -o mymath.o") == 0
assert os.system("ar rcs libmymath.a mymath.o") == 0
except:
if not os.path.exists("libmymath.a"):
print("Error building external library, please create libmymath.a manually.")
sys.exit(1)
# Here is how to use the library built above.
ext_modules = cythonize([
Extension("call_mymath",
sources=["call_mymath.pyx"],
include_dirs=[os.getcwd()], # path to .h file(s)
library_dirs=[os.getcwd()], # path to .a or .so file(s)
libraries=['mymath'])
])
setup(
name='Demos',
ext_modules=ext_modules,
)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/numpy_demo.pyx 0000644 0001751 0000171 00000000277 00000000000 017733 0 ustar 00runner docker 0000000 0000000 cimport numpy as cnp
def sum_of_squares(cnp.ndarray[double, ndim=1] arr):
cdef long N = arr.shape[0]
cdef double ss = 0
for i in range(N):
ss += arr[i]**2
return ss
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/overflow_perf.pyx 0000644 0001751 0000171 00000010413 00000000000 020427 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3
# distutils: extra_compile_args = -O3
cimport cython
ctypedef fused INT:
int
long long
unsigned int
unsigned long long
object
ctypedef fused C_INT:
int
long long
unsigned int
unsigned long long
@cython.overflowcheck(False)
def fib(INT n):
"""
>>> [fib(k) for k in range(10)]
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
"""
cdef INT a, b, k
a, b = 0, 1
for k in range(n):
a, b = b, a + b
return int(b)
@cython.overflowcheck(True)
def fib_overflow(INT n):
"""
>>> [fib_overflow(k) for k in range(10)]
[1, 1, 2, 3, 5, 8, 13, 21, 34, 55]
"""
cdef INT a, b, k
a, b = 0, 1
for k in range(n):
a, b = b, a + b
return int(b)
@cython.overflowcheck(False)
def collatz(INT n):
"""
>>> collatz(1)
0
>>> collatz(5)
5
>>> collatz(10)
6
"""
cdef INT k = 0
while n != 1:
if n % 2 == 0:
n //= 2
else:
n = 3*n + 1
k += 1
return int(k)
@cython.overflowcheck(True)
@cython.overflowcheck.fold(False)
def collatz_overflow(INT n):
"""
>>> collatz_overflow(1)
0
>>> collatz_overflow(5)
5
>>> collatz_overflow(10)
6
"""
cdef INT k = 0
while n != 1:
if n % 2 == 0:
n //= 2
else:
n = 3*n + 1
k += 1
return int(k)
@cython.overflowcheck(True)
@cython.overflowcheck.fold(True)
def collatz_overflow_fold(INT n):
"""
>>> collatz_overflow_fold(1)
0
>>> collatz_overflow_fold(5)
5
>>> collatz_overflow_fold(10)
6
"""
cdef INT k = 0
while n != 1:
if n % 2 == 0:
n //= 2
else:
n = 3*n + 1
k += 1
return int(k)
@cython.overflowcheck(False)
def factorial(INT n):
"""
>>> factorial(2)
2
>>> factorial(5)
120
"""
cdef INT k, res = 1
for k in range(2, n+1):
res = res * k
return int(res)
@cython.overflowcheck(True)
def factorial_overflow(INT n):
"""
>>> factorial_overflow(2)
2
>>> factorial_overflow(5)
120
"""
cdef INT k, res = 1
for k in range(2, n+1):
res = res * k
return int(res)
@cython.overflowcheck(False)
def most_orthogonal(C_INT[:,::1] vectors):
cdef C_INT n = vectors.shape[0]
cdef C_INT* a
cdef C_INT* b
cdef double min_dot = 2 # actual max is 1
for i in range(n):
for j in range(i):
a = &vectors[i, 0]
b = &vectors[j, 0]
# A highly nested arithmetic expression...
normalized_dot = (1.0 * (a[0]*b[0] + a[1]*b[1] + a[2]*b[2]) /
((a[0]*a[0] + a[1]*a[1] + a[2]*a[2]) * (b[0]*b[0] + b[1]*b[1]+b[2]*b[2])))
if normalized_dot < min_dot:
min_dot = normalized_dot
min_pair = i, j
return vectors[i], vectors[j]
@cython.overflowcheck(True)
@cython.overflowcheck.fold(False)
def most_orthogonal_overflow(C_INT[:,::1] vectors):
cdef C_INT n = vectors.shape[0]
cdef C_INT* a
cdef C_INT* b
cdef double min_dot = 2 # actual max is 1
for i in range(n):
for j in range(i):
a = &vectors[i, 0]
b = &vectors[j, 0]
# A highly nested arithmetic expression...
normalized_dot = ((a[0]*b[0] + a[1]*b[1] + a[2]*b[2]) /
(1.0 * (a[0]*a[0] + a[1]*a[1] + a[2]*a[2]) * (b[0]*b[0] + b[1]*b[1]+b[2]*b[2])))
if normalized_dot < min_dot:
min_dot = normalized_dot
min_pair = i, j
return vectors[i], vectors[j]
@cython.overflowcheck(True)
@cython.overflowcheck.fold(True)
def most_orthogonal_overflow_fold(C_INT[:,::1] vectors):
cdef C_INT n = vectors.shape[0]
cdef C_INT* a
cdef C_INT* b
cdef double min_dot = 2 # actual max is 1
for i in range(n):
for j in range(i):
a = &vectors[i, 0]
b = &vectors[j, 0]
# A highly nested arithmetic expression...
normalized_dot = ((a[0]*b[0] + a[1]*b[1] + a[2]*b[2]) /
(1.0 * (a[0]*a[0] + a[1]*a[1] + a[2]*a[2]) * (b[0]*b[0] + b[1]*b[1]+b[2]*b[2])))
if normalized_dot < min_dot:
min_dot = normalized_dot
min_pair = i, j
return vectors[i], vectors[j]
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/overflow_perf_run.py 0000644 0001751 0000171 00000003703 00000000000 021127 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import, print_function
from overflow_perf import *
import sys
import timeit
try:
import numpy as np
except ImportError:
np = None
def run_tests(N):
global f
for func in most_orthogonal, fib, collatz, factorial:
print(func.__name__)
for type in ['int', 'unsigned int', 'long long', 'unsigned long long', 'object']:
if func == most_orthogonal:
if type == 'object' or np is None:
continue
type_map = {'int': 'int32', 'unsigned int': 'uint32', 'long long': 'int64', 'unsigned long long': 'uint64'}
shape = N, 3
arg = np.ndarray(shape, dtype=type_map[type])
arg[:] = 1000 * np.random.random(shape)
else:
arg = N
try:
print("%s[%s](%s)" % (func.__name__, type, N))
with_overflow = my_timeit(globals()[func.__name__ + "_overflow"][type], arg)
no_overflow = my_timeit(func[type], arg)
print("\t%0.04e\t%0.04e\t%0.04f" % (no_overflow, with_overflow, with_overflow / no_overflow))
if func.__name__ + "_overflow_fold" in globals():
with_overflow = my_timeit(globals()[func.__name__ + "_overflow_fold"][type], arg)
print("\t%0.04e\t%0.04e\t%0.04f (folded)" % (
no_overflow, with_overflow, with_overflow / no_overflow))
except OverflowError:
print(" ", "Overflow")
def my_timeit(func, N):
global f, arg
f = func
arg = N
for exponent in range(10, 30):
times = 2 ** exponent
res = min(timeit.repeat("f(arg)", setup="from __main__ import f, arg", repeat=5, number=times))
if res > .25:
break
return res / times
params = sys.argv[1:]
if not params:
params = [129, 9, 97]
for arg in params:
print()
print("N", arg)
run_tests(int(arg))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/primes.pyx 0000644 0001751 0000171 00000000622 00000000000 017050 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3
print("starting")
def primes(int kmax):
# cdef int n, k, i
cdef int p[1000]
result = []
if kmax > 1000:
kmax = 1000
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
i += 1
if i == k:
p[k] = n
k += 1
result.append(n)
n += 1
return result
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/pyprimes.py 0000644 0001751 0000171 00000000360 00000000000 017230 0 ustar 00runner docker 0000000 0000000 def primes(kmax):
p = []
k = 0
n = 2
while k < kmax:
i = 0
while i < k and n % p[i] != 0:
i = i + 1
if i == k:
p.append(n)
k = k + 1
n = n + 1
return p
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/run_numeric_demo.py 0000644 0001751 0000171 00000000162 00000000000 020712 0 ustar 00runner docker 0000000 0000000 import numpy
import numpy_demo
a = numpy.array([1.0, 3.5, 8.4, 2.3, 6.6, 4.1], "d")
numpy_demo.sum_of_squares(a)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/run_primes.py 0000644 0001751 0000171 00000000263 00000000000 017545 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import, print_function
import sys
from primes import primes
if len(sys.argv) >= 2:
n = int(sys.argv[1])
else:
n = 1000
print(primes(n))
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/run_spam.py 0000644 0001751 0000171 00000000270 00000000000 017204 0 ustar 00runner docker 0000000 0000000 from __future__ import absolute_import, print_function
from spam import Spam
s = Spam()
print("Created:", s)
s.set_amount(42)
print("Amount =", s.get_amount())
s.describe()
s = None
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/setup.py 0000644 0001751 0000171 00000001224 00000000000 016520 0 ustar 00runner docker 0000000 0000000 # Run as:
# python setup.py build_ext --inplace
import sys
sys.path.insert(0, "..")
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
ext_modules = cythonize("**/*.pyx", exclude="numpy_*.pyx")
# Only compile the following if numpy is installed.
try:
from numpy.distutils.misc_util import get_numpy_include_dirs
numpy_demo = [Extension("*",
["numpy_*.pyx"],
include_dirs=get_numpy_include_dirs())]
ext_modules.extend(cythonize(numpy_demo))
except ImportError:
pass
setup(
name = 'Demos',
ext_modules = ext_modules,
)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Demos/spam.pyx 0000644 0001751 0000171 00000000626 00000000000 016515 0 ustar 00runner docker 0000000 0000000 # cython: language_level=3
#
# Example of an extension type.
#
cdef class Spam:
cdef public int amount
def __cinit__(self):
self.amount = 0
def __dealloc__(self):
print(self.amount, "tons of spam is history.")
def get_amount(self):
return self.amount
def set_amount(self, new_amount):
self.amount = new_amount
def describe(self):
print(self.amount, "tons of spam!")
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/INSTALL.txt 0000644 0001751 0000171 00000001036 00000000000 015607 0 ustar 00runner docker 0000000 0000000 Cython - Installation Instructions
==================================
You have two installation options:
(1) Run the setup.py script in this directory
as follows:
python setup.py install
This will install the Cython package
into your Python system.
OR
(2) If you prefer not to modify your Python
installation, arrange for the directory
containing this file (INSTALL.txt) to be in
your PYTHONPATH. On unix, also put the bin
directory on your PATH.
See README.txt for pointers to other documentation.
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/LICENSE.txt 0000644 0001751 0000171 00000023675 00000000000 015600 0 ustar 00runner docker 0000000 0000000 Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/MANIFEST.in 0000644 0001751 0000171 00000001747 00000000000 015507 0 ustar 00runner docker 0000000 0000000 include MANIFEST.in README.rst INSTALL.txt ToDo.txt USAGE.txt CHANGES.rst
include COPYING.txt LICENSE.txt Makefile
include .gitrev
include pylintrc
include setup.py
include setupegg.py
include bin/*
include cython.py cythonize.py cygdb.py
recursive-include Cython *.pyx *.pxd
include Cython/Parser/Grammar Cython/Parser/__init__.py
include Doc/*
include Demos/*.pyx
include Demos/*.py
include Demos/callback/*
include Demos/benchmarks/*
include Demos/embed/*
include Demos/freeze/*
include Demos/libraries/*
include Demos/Makefile*
recursive-include Cython/Debugger/Tests *.pyx *.pxd *.c *.h
recursive-include Cython/Utility *.pyx *.pxd *.c *.h *.cpp
recursive-include Tools *
recursive-include tests *.pyx *.pxd *.pxi *.py *.h *.hpp *.BROKEN bugs.txt
recursive-include tests *_lib.cpp *.srctree
recursive-include docs *
include runtests.py
include Cython/Debugger/Tests/cfuncs.c
include Cython/Debugger/Tests/codefile
recursive-include pyximport *.py
include pyximport/PKG-INFO pyximport/README
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Makefile 0000644 0001751 0000171 00000004766 00000000000 015415 0 ustar 00runner docker 0000000 0000000 PACKAGENAME=Cython
PYTHON?=python
TESTOPTS?=
REPO = git://github.com/cython/cython.git
VERSION?=$(shell sed -ne 's|^__version__\s*=\s*"\([^"]*\)".*|\1|p' Cython/Shadow.py)
MANYLINUX_CFLAGS=-O3 -g0 -mtune=generic -pipe -fPIC
MANYLINUX_LDFLAGS=
MANYLINUX_IMAGES= \
manylinux1_x86_64 \
manylinux1_i686 \
musllinux_1_1_x86_64 \
manylinux_2_24_x86_64 \
manylinux_2_24_i686 \
manylinux_2_24_aarch64 \
# manylinux_2_24_ppc64le \
# manylinux_2_24_s390x
all: local
local:
${PYTHON} setup.py build_ext --inplace
sdist: dist/$(PACKAGENAME)-$(VERSION).tar.gz
dist/$(PACKAGENAME)-$(VERSION).tar.gz:
$(PYTHON) setup.py sdist
pywheel: dist/$(PACKAGENAME)-$(VERSION)-py2.py3-none-any.whl
dist/$(PACKAGENAME)-$(VERSION)-py2.py3-none-any.whl:
${PYTHON} setup.py bdist_wheel --no-cython-compile --universal
[ -f "$@" ] # check that we generated the expected universal wheel
TMPDIR = .repo_tmp
.git: .gitrev
rm -rf $(TMPDIR)
git clone -n $(REPO) $(TMPDIR)
cd $(TMPDIR) && git reset -q "$(shell cat .gitrev)"
mv $(TMPDIR)/.git .
rm -rf $(TMPDIR)
git ls-files -d | xargs git checkout --
# Create a git repo from an unpacked source directory.
repo: .git
clean:
@echo Cleaning Source
@rm -fr build
@rm -f *.py[co] */*.py[co] */*/*.py[co] */*/*/*.py[co]
@rm -f *.so */*.so */*/*.so
@rm -f *.pyd */*.pyd */*/*.pyd
@rm -f *~ */*~ */*/*~
@rm -f core */core
@rm -f Cython/Compiler/*.c
@rm -f Cython/Plex/*.c
@rm -f Cython/Tempita/*.c
@rm -f Cython/Runtime/refnanny.c
@(cd Demos; $(MAKE) clean)
testclean:
rm -fr BUILD TEST_TMP
test: testclean
${PYTHON} runtests.py -vv ${TESTOPTS}
s5:
$(MAKE) -C Doc/s5 slides
qemu-user-static:
docker run --rm --privileged hypriot/qemu-register
wheel_manylinux: sdist $(addprefix wheel_,$(MANYLINUX_IMAGES))
$(addprefix wheel_,$(filter-out %_x86_64, $(filter-out %_i686, $(MANYLINUX_IMAGES)))): qemu-user-static
wheel_%: dist/$(PACKAGENAME)-$(VERSION).tar.gz
echo "Building wheels for $(PACKAGENAME) $(VERSION)"
mkdir -p wheelhouse_$(subst wheel_,,$@)
time docker run --rm -t \
-v $(shell pwd):/io \
-e CFLAGS="$(MANYLINUX_CFLAGS)" \
-e LDFLAGS="$(MANYLINUX_LDFLAGS) -fPIC" \
-e WHEELHOUSE=wheelhouse$(subst wheel_musllinux,,$(subst wheel_manylinux,,$@)) \
quay.io/pypa/$(subst wheel_,,$@) \
bash -c 'for PYBIN in /opt/python/cp*/bin; do \
$$PYBIN/python -V; \
{ $$PYBIN/pip wheel -w /io/$$WHEELHOUSE /io/$< & } ; \
done; wait; \
for whl in /io/$$WHEELHOUSE/$(PACKAGENAME)-$(VERSION)-*-linux_*.whl; do auditwheel repair $$whl -w /io/$$WHEELHOUSE; done'
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.7362957
Cython-0.29.28/PKG-INFO 0000644 0001751 0000171 00000005343 00000000000 015042 0 ustar 00runner docker 0000000 0000000 Metadata-Version: 2.1
Name: Cython
Version: 0.29.28
Summary: The Cython compiler for writing C extensions for the Python language.
Home-page: http://cython.org/
Author: Robert Bradshaw, Stefan Behnel, Dag Seljebotn, Greg Ewing, et al.
Author-email: cython-devel@python.org
License: Apache
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 2.6
Classifier: Programming Language :: Python :: 2.7
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.4
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Programming Language :: C
Classifier: Programming Language :: Cython
Classifier: Topic :: Software Development :: Code Generators
Classifier: Topic :: Software Development :: Compilers
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*
License-File: LICENSE.txt
License-File: COPYING.txt
The Cython language makes writing C extensions for the Python language as
easy as Python itself. Cython is a source code translator based on Pyrex_,
but supports more cutting edge functionality and optimizations.
The Cython language is a superset of the Python language (almost all Python
code is also valid Cython code), but Cython additionally supports optional
static typing to natively call C functions, operate with C++ classes and
declare fast C types on variables and class attributes. This allows the
compiler to generate very efficient C code from Cython code.
This makes Cython the ideal language for writing glue code for external
C/C++ libraries, and for fast C modules that speed up the execution of
Python code.
Note that for one-time builds, e.g. for CI/testing, on platforms that are not
covered by one of the wheel packages provided on PyPI *and* the pure Python wheel
that we provide is not used, it is substantially faster than a full source build
to install an uncompiled (slower) version of Cython with::
pip install Cython --install-option="--no-cython-compile"
.. _Pyrex: http://www.cosc.canterbury.ac.nz/greg.ewing/python/Pyrex/
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/README.rst 0000644 0001751 0000171 00000005115 00000000000 015431 0 ustar 00runner docker 0000000 0000000 Welcome to Cython!
==================
Cython is a language that makes writing C extensions for
Python as easy as Python itself. Cython is based on
Pyrex, but supports more cutting edge functionality and
optimizations.
The Cython language is very close to the Python language, but Cython
additionally supports calling C functions and declaring C types on variables
and class attributes. This allows the compiler to generate very efficient C
code from Cython code.
This makes Cython the ideal language for wrapping external C libraries, and
for fast C modules that speed up the execution of Python code.
* Official website: http://cython.org/
* Documentation: http://docs.cython.org/en/latest/
* Github repository: https://github.com/cython/cython
* Wiki: https://github.com/cython/cython/wiki
Installation:
-------------
If you already have a C compiler, just do::
pip install Cython
otherwise, see `the installation page `_.
License:
--------
The original Pyrex program was licensed "free of restrictions" (see below).
Cython itself is licensed under the permissive **Apache License**.
See `LICENSE.txt `_.
Contributing:
-------------
Want to contribute to the Cython project?
Here is some `help to get you started `_.
Get the full source history:
----------------------------
Note that Cython used to ship the full version control repository in its source
distribution, but no longer does so due to space constraints. To get the
full source history from a downloaded source archive, make sure you have git
installed, then step into the base directory of the Cython source distribution
and type::
make repo
The following is from Pyrex:
------------------------------------------------------
This is a development version of Pyrex, a language
for writing Python extension modules.
For more info, see:
* Doc/About.html for a description of the language
* INSTALL.txt for installation instructions
* USAGE.txt for usage instructions
* Demos for usage examples
Comments, suggestions, bug reports, etc. are
welcome!
Copyright stuff: Pyrex is free of restrictions. You
may use, redistribute, modify and distribute modified
versions.
The latest version of Pyrex can be found `here `_.
| Greg Ewing, Computer Science Dept
| University of Canterbury
| Christchurch, New Zealand
A citizen of NewZealandCorp, a wholly-owned subsidiary of USA Inc.
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/ToDo.txt 0000644 0001751 0000171 00000016163 00000000000 015355 0 ustar 00runner docker 0000000 0000000 See http://trac.cython.org/cython_trac and http://wiki.cython.org/enhancements
-- The Original Pyrex Todo List --
DONE - Pointer-to-function types.
DONE - Nested declarators.
DONE - Varargs C func defs and calls.
DONE - * and ** args in Python func defs.
DONE - Default argument values.
DONE - Tracebacks.
DONE - Disallow creating char * from Python temporary anywhere
(not just on assignment).
DONE - Module and function and class doc strings.
DONE - Predeclare C functions.
DONE - Constant expressions.
DONE - Forward C struct declarations.
DONE - Prefix & operator.
DONE - Get rid of auto string->char coercion and
add a c'X' syntax for char literals.
DONE - Cascaded assignments (a = b = c).
DONE - 'include' statement for including other Pyrex files.
DONE - Add command line option for specifying name of generated C file.
DONE - Add 'else' clause to try-except.
DONE - Allow extension types to be declared "public" so they
can be accessed from another Pyrex module or a C file.
DONE - Don't try to generate objstruct definition for external
extension type declared without suite (treat as though
declared with empty suite).
DONE - Implement two-argument form of 'assert' statement.
Const types.
Tuple/list construction: Evaluate & store items one at a time?
Varargs argument traversal.
Use PyDict_SetItemString to build keyword arg dicts?
(Or wait until names are interned.)
Intern names.
print >>file
abs() and anything similar.
Semicolon-separated statement lists.
Optional semicolons after C declarations.
Multiple C declarations on one line?
Optimise return without value outside of try-finally.
exec statement.
from ... import statement.
Use iterator protocol for unpacking.
Save & restore exception being handled on function entry/exit.
In-place operators (+=, etc).
Constant declarations. Syntax?
DONE - Some way for C functions to signal Python errors?
Check for lack of return with value in non-void C functions?
Allow 'pass' in struct/union/enum definition.
Make C structs callable as constructors.
DONE - Provide way of specifying C names.
DONE - Public cdefs.
When calling user __dealloc__ func, save & restore exception.
DONE - Forward declaration of extension types.
Complex number parsetuple format?
DONE - long long type
DONE - long double type?
Windows __fooblarg function declaration things.
Generate type, var and func declarations in the same order that
they appear in the source file.
Provide a way of declaring a C function as returning a
borrowed Python reference.
Provide a way of specifying whether a Python object obtained
by casting a pointer should be treated as a new reference
or not.
Optimize integer for-loops.
Make sizeof() take types as well as variables.
Allow "unsigned" to be used alone as a type name.
Allow duplicate declarations, at least in extern-from.
Do something about installing proper version of pyrexc
script according to platform in setup.py.
DONE - Add "-o filename" command line option to unix/dos versions.
Recognise #line directives?
Catch floating point exceptions?
Check that forward-declared non-external extension types
are defined.
Generate type test when casting from one Python type
to another.
Generate a Pyrex include file for public declarations
as well as a C one.
Syntax for defining indefinite-sized int & float types.
Allow ranges of exception values.
Support "complex double" and "complex float"?
Allow module-level Python variables to be declared extern.
Consider:
>cdef extern from "foo.h":
> int dosomething() except -1 raise MyException
Properties for Python types.
DONE - Properties for extension types.
Find a way to make classmethod and staticmethod work better.
DONE - Document workarounds for classmethod and staticmethod.
Statically initialised C arrays & structs.
Reduce generation of unused vars and unreachable code?
Support for acquiring and releasing GIL.
Make docstrings of extension type special methods work.
Treat result of getting C attribute of extension type as non-ephemeral.
Make None a reserved identifier.
Teach it about builtin functions that correspond to
Python/C API calls.
Teach it about common builtin types.
Option for generating a main() function?
DONE - Allow an extension type to inherit from another type.
Do something about external C functions declared as returning
const * types?
Use PyString_FromStringAndSize for string literals?
DONE - C functions as methods of extension types.
What to do about __name__ etc. attributes of a module (they are
currently assumed to be built-in names).
Use PyDict_GetItem etc. on module & builtins dicts for speed.
Intern all string literals used as Python strings?
[Koshy ]
Make extension types weak-referenceable.
[Matthias Baas ]
Make 'pass' work in the body of an extern-from struct
or union.
Disallow a filename which results in an illegal identifier when
used as a module name.
Use ctypedef names.
Provide an easy way of exposing a set of enum values as Python names.
[John J Lee ]
Prevent user from returning a value from special methods that
return an error indicator only.
Use PyObject_TypeCheck instead of PyObject_IsInstance?
Allow * in cimport? [John J Lee ]
FAQ: Q. Pyrex says my extension type object has no attribute 'rhubarb', but
I know it does.
A. Have you declared the type at the point where you're using it?
Eliminate lvalue casts! (Illegal in C++, also disallowed by some C compilers)
[Matthias Baas ]
Make Python class construction work more like it does in Python.
Give the right module name to Python classes.
Command line switch for full pathnames in backtraces?
Use PyString_FromStringAndSize on string literals containing
nulls.
Peephole optimisation? [Vladislav Bulatov ]
Avoid PyArg_ParseTuple call when a function takes no positional args.
Omit incref/decref of arguments that are not assigned to?
Can a faster way of instantiating extension types be found?
Disallow declaring a special method of an extension type with
'cdef' instead of 'def'.
Use PySequence_GetItem instead of PyObject_GetItem when index
is an integer.
If a __getitem__ method is declared with an int index, use the
sq_item slot instead of the mp_subscript slot.
Provide some way of controlling the argument list passed to
an extension type's base __new__ method?
[Alain Pointdexter ]
Rename __new__ in extension types to __alloc__.
Implement a true __new__ for extension types.
Way to provide constructors for extension types that are not
available to Python and can accept C types directly?
Support generators by turning them into extension types?
List comprehensions.
Variable declarations inside inner code blocks?
Initial values when declaring variables?
Do something about __stdcall.
Support class methods in extension types using METH_CLASS flag.
Disallow defaulting types to 'object' in C declarations?
C globals with static initialisers.
Find a way of providing C-only initialisers for extension types.
Metaclasses for extension types?
Make extension types use Py_TPFLAGS_HEAPTYPE so their __module__
will get set dynamically?
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/Tools/ 0000755 0001751 0000171 00000000000 00000000000 015040 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/BUILD.bazel 0000644 0001751 0000171 00000000000 00000000000 016704 0 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/cevaltrace.py 0000644 0001751 0000171 00000011530 00000000000 017523 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python3
"""
Translate the byte code of a Python function into the corresponding
sequences of C code in CPython's "ceval.c".
"""
from __future__ import print_function, absolute_import
import re
import os.path
from dis import get_instructions # requires Python 3.4+
# collapse some really boring byte codes
_COLLAPSE = {'NOP', 'LOAD_CONST', 'POP_TOP', 'JUMP_FORWARD'}
#_COLLAPSE.clear()
_is_start = re.compile(r"\s* switch \s* \( opcode \)", re.VERBOSE).match
# Py3: TARGET(XX), Py2: case XX
_match_target = re.compile(r"\s* (?: TARGET \s* \( | case \s* ) \s* (\w+) \s* [:)]", re.VERBOSE).match
_ignored = re.compile(r"\s* PREDICTED[A-Z_]*\(", re.VERBOSE).match
_is_end = re.compile(r"\s* } \s* /\* \s* switch \s* \*/", re.VERBOSE).match
_find_pyversion = re.compile(r'\#define \s+ PY_VERSION \s+ "([^"]+)"', re.VERBOSE).findall
class ParseError(Exception):
def __init__(self, message="Failed to parse ceval.c"):
super(ParseError, self).__init__(message)
def parse_ceval(file_path):
snippets = {}
with open(file_path) as f:
lines = iter(f)
for line in lines:
if _is_start(line):
break
else:
raise ParseError()
targets = []
code_lines = []
for line in lines:
target_match = _match_target(line)
if target_match:
if code_lines:
code = ''.join(code_lines).rstrip()
for target in targets:
snippets[target] = code
del code_lines[:], targets[:]
targets.append(target_match.group(1))
elif _ignored(line):
pass
elif _is_end(line):
break
else:
code_lines.append(line)
else:
if not snippets:
raise ParseError()
return snippets
def translate(func, ceval_snippets):
start_offset = 0
code_obj = getattr(func, '__code__', None)
if code_obj and os.path.exists(code_obj.co_filename):
start_offset = code_obj.co_firstlineno
with open(code_obj.co_filename) as f:
code_line_at = {
i: line.strip()
for i, line in enumerate(f, 1)
if line.strip()
}.get
else:
code_line_at = lambda _: None
for instr in get_instructions(func):
code_line = code_line_at(instr.starts_line)
line_no = (instr.starts_line or start_offset) - start_offset
yield line_no, code_line, instr, ceval_snippets.get(instr.opname)
def main():
import sys
import importlib.util
if len(sys.argv) < 3:
print("Usage: %s path/to/Python/ceval.c script.py ..." % sys.argv[0], file=sys.stderr)
return
ceval_source_file = sys.argv[1]
version_header = os.path.join(os.path.dirname(ceval_source_file), '..', 'Include', 'patchlevel.h')
if os.path.exists(version_header):
with open(version_header) as f:
py_version = _find_pyversion(f.read())
if py_version:
py_version = py_version[0]
if not sys.version.startswith(py_version + ' '):
print("Warning: disassembling with Python %s, but ceval.c has version %s" % (
sys.version.split(None, 1)[0],
py_version,
), file=sys.stderr)
snippets = parse_ceval(ceval_source_file)
for code in _COLLAPSE:
if code in snippets:
snippets[code] = ''
for file_path in sys.argv[2:]:
module_name = os.path.basename(file_path)
print("/*######## MODULE %s ########*/" % module_name)
print('')
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
for func_name, item in sorted(vars(module).items()):
if not callable(item):
continue
print("/* FUNCTION %s */" % func_name)
print("static void") # assuming that it highlights in editors
print("%s() {" % func_name)
last_line = None
for line_no, code_line, instr, snippet in translate(item, snippets):
if last_line != line_no:
if code_line:
print('')
print('/*# %3d %s */' % (line_no, code_line))
print('')
last_line = line_no
print(" %s:%s {%s" % (
instr.opname,
' /* %s */' % instr.argrepr if instr.arg is not None else '',
' /* ??? */' if snippet is None else ' /* ... */ }' if snippet == '' else '',
))
print(snippet or '')
print("} /* FUNCTION %s */" % func_name)
if __name__ == '__main__':
main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/ci-run.sh 0000644 0001751 0000171 00000012577 00000000000 016605 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/bash
GCC_VERSION=${GCC_VERSION:=8}
# Set up compilers
if [[ $TEST_CODE_STYLE == "1" ]]; then
echo "Skipping compiler setup"
elif [[ $OSTYPE == "linux-gnu"* ]]; then
echo "Setting up linux compiler"
echo "Installing requirements [apt]"
sudo apt-add-repository -y "ppa:ubuntu-toolchain-r/test"
sudo apt update -y -q
sudo apt install -y -q ccache gdb python-dbg python3-dbg gcc-$GCC_VERSION || exit 1
ALTERNATIVE_ARGS=""
if [[ $BACKEND == *"cpp"* ]]; then
sudo apt install -y -q g++-$GCC_VERSION || exit 1
ALTERNATIVE_ARGS="--slave /usr/bin/g++ g++ /usr/bin/g++-$GCC_VERSION"
fi
sudo /usr/sbin/update-ccache-symlinks
echo "/usr/lib/ccache" >> $GITHUB_PATH # export ccache to path
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-$GCC_VERSION 60 $ALTERNATIVE_ARGS
export CC="gcc"
if [[ $BACKEND == *"cpp"* ]]; then
sudo update-alternatives --set g++ /usr/bin/g++-$GCC_VERSION
export CXX="g++"
fi
elif [[ $OSTYPE == "darwin"* ]]; then
echo "Setting up macos compiler"
export CC="clang -Wno-deprecated-declarations"
export CXX="clang++ -stdlib=libc++ -Wno-deprecated-declarations"
else
echo "No setup specified for $OSTYPE"
fi
# Set up miniconda
if [[ $STACKLESS == "true" ]]; then
echo "Installing stackless python"
#conda install --quiet --yes nomkl --file=test-requirements.txt --file=test-requirements-cpython.txt
conda config --add channels stackless
conda install --quiet --yes stackless || exit 1
fi
PYTHON_SYS_VERSION=$(python -c 'import sys; print(sys.version)')
# Log versions in use
echo "===================="
echo "|VERSIONS INSTALLED|"
echo "===================="
echo "Python $PYTHON_SYS_VERSION"
if [[ $CC ]]; then
which ${CC%% *}
${CC%% *} --version
fi
if [[ $CXX ]]; then
which ${CXX%% *}
${CXX%% *} --version
fi
echo "===================="
# Install python requirements
echo "Installing requirements [python]"
if [[ $PYTHON_VERSION == "2.7"* ]]; then
pip install wheel || exit 1
pip install -r test-requirements-27.txt || exit 1
elif [[ $PYTHON_VERSION == "3."[45]* ]]; then
python -m pip install wheel || exit 1
python -m pip install -r test-requirements-34.txt || exit 1
else
python -m pip install -U pip setuptools wheel || exit 1
if [[ $PYTHON_VERSION != *"-dev" || $COVERAGE == "1" ]]; then
python -m pip install -r test-requirements.txt || exit 1
if [[ $PYTHON_VERSION != "pypy"* && $PYTHON_VERSION != "3."[1]* ]]; then
python -m pip install -r test-requirements-cpython.txt || exit 1
fi
fi
fi
if [[ $TEST_CODE_STYLE == "1" ]]; then
STYLE_ARGS="--no-unit --no-doctest --no-file --no-pyregr --no-examples"
python -m pip install -r doc-requirements.txt || exit 1
else
STYLE_ARGS="--no-code-style"
# Install more requirements
if [[ $PYTHON_VERSION != *"-dev" ]]; then
if [[ $BACKEND == *"cpp"* ]]; then
echo "WARNING: Currently not installing pythran due to compatibility issues"
# python -m pip install pythran==0.9.5 || exit 1
fi
if [[ $BACKEND != "cpp" && $PYTHON_VERSION != "pypy"* &&
$PYTHON_VERSION != "2"* && $PYTHON_VERSION != "3.4"* ]]; then
python -m pip install mypy || exit 1
fi
fi
fi
# Run tests
echo "==== Running tests ===="
ccache -s 2>/dev/null || true
export PATH="/usr/lib/ccache:$PATH"
# Most modern compilers allow the last conflicting option
# to override the previous ones, so '-O0 -O3' == '-O3'
# This is true for the latest msvc, gcc and clang
CFLAGS="-O0 -ggdb -Wall -Wextra"
if [[ $NO_CYTHON_COMPILE != "1" && $PYTHON_VERSION != "pypy"* ]]; then
BUILD_CFLAGS="$CFLAGS -O2"
if [[ $PYTHON_SYS_VERSION == "2"* ]]; then
BUILD_CFLAGS="$BUILD_CFLAGS -fno-strict-aliasing"
fi
SETUP_ARGS=""
if [[ $COVERAGE == "1" ]]; then
SETUP_ARGS="$SETUP_ARGS --cython-coverage"
fi
if [[ $CYTHON_COMPILE_ALL == "1" ]]; then
SETUP_ARGS="$SETUP_ARGS --cython-compile-all"
fi
#SETUP_ARGS="$SETUP_ARGS
# $(python -c 'import sys; print("-j5" if sys.version_info >= (3,5) else "")')"
CFLAGS=$BUILD_CFLAGS \
python setup.py build_ext -i $SETUP_ARGS || exit 1
# COVERAGE can be either "" (empty or not set) or "1" (when we set it)
# STACKLESS can be either "" (empty or not set) or "true" (when we set it)
# CYTHON_COMPILE_ALL can be either "" (empty or not set) or "1" (when we set it)
if [[ $COVERAGE != "1" && $STACKLESS != "true" && $BACKEND != *"cpp"* &&
$CYTHON_COMPILE_ALL != "1" && $LIMITED_API == "" && $EXTRA_CFLAGS == "" ]]; then
python setup.py bdist_wheel || exit 1
fi
fi
if [[ $TEST_CODE_STYLE == "1" ]]; then
make -C docs html || echo "FIXME: docs build failed!"
elif [[ $PYTHON_VERSION != "pypy"* ]]; then
# Run the debugger tests in python-dbg if available
# (but don't fail, because they currently do fail)
PYTHON_DBG=$(python -c 'import sys; print("%d.%d" % sys.version_info[:2])')
PYTHON_DBG="python$PYTHON_DBG-dbg"
if $PYTHON_DBG -V >&2; then
CFLAGS=$CFLAGS $PYTHON_DBG \
runtests.py -vv --no-code-style Debugger --backends=$BACKEND
fi
fi
RUNTESTS_ARGS=""
if [[ $COVERAGE == "1" ]]; then
RUNTESTS_ARGS="$RUNTESTS_ARGS --coverage --coverage-html --cython-only"
fi
if [[ $TEST_CODE_STYLE != "1" ]]; then
RUNTESTS_ARGS="$RUNTESTS_ARGS -j7"
fi
export CFLAGS="$CFLAGS $EXTRA_CFLAGS"
python runtests.py \
-vv $STYLE_ARGS \
-x Debugger \
--backends=$BACKEND \
$LIMITED_API \
$EXCLUDE \
$RUNTESTS_ARGS
EXIT_CODE=$?
ccache -s 2>/dev/null || true
exit $EXIT_CODE
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/cystdlib.py 0000644 0001751 0000171 00000012602 00000000000 017230 0 ustar 00runner docker 0000000 0000000 """
Highly experimental script that compiles the CPython standard library using Cython.
Execute the script either in the CPython 'Lib' directory or pass the
option '--current-python' to compile the standard library of the running
Python interpreter.
Pass '-j N' to get a parallel build with N processes.
Usage example::
$ python cystdlib.py --current-python build_ext -i
"""
import os
import sys
from distutils.core import setup
from Cython.Build import cythonize
from Cython.Compiler import Options
# improve Python compatibility by allowing some broken code
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
exclude_patterns = ['**/test/**/*.py', '**/tests/**/*.py', '**/__init__.py']
broken = [
'idlelib/MultiCall.py',
'email/utils.py',
'multiprocessing/reduction.py',
'multiprocessing/util.py',
'threading.py', # interrupt handling
'lib2to3/fixes/fix_sys_exc.py',
'traceback.py',
'types.py',
'enum.py',
'keyword.py',
'_collections_abc.py',
'importlib/_bootstrap',
]
default_directives = dict(
auto_cpdef=False, # enable when it's safe, see long list of failures below
binding=True,
set_initial_path='SOURCEFILE')
default_directives['optimize.inline_defnode_calls'] = True
special_directives = [
(['pkgutil.py',
'decimal.py',
'datetime.py',
'optparse.py',
'sndhdr.py',
'opcode.py',
'ntpath.py',
'urllib/request.py',
'plat-*/TYPES.py',
'plat-*/IN.py',
'tkinter/_fix.py',
'lib2to3/refactor.py',
'webbrowser.py',
'shutil.py',
'multiprocessing/forking.py',
'xml/sax/expatreader.py',
'xmlrpc/client.py',
'pydoc.py',
'xml/etree/ElementTree.py',
'posixpath.py',
'inspect.py',
'ctypes/util.py',
'urllib/parse.py',
'warnings.py',
'tempfile.py',
'trace.py',
'heapq.py',
'pickletools.py',
'multiprocessing/connection.py',
'hashlib.py',
'getopt.py',
'os.py',
'types.py',
], dict(auto_cpdef=False)),
]
del special_directives[:] # currently unused
def build_extensions(includes='**/*.py',
excludes=None,
special_directives=special_directives,
language_level=sys.version_info[0],
parallel=None):
if isinstance(includes, str):
includes = [includes]
excludes = list(excludes or exclude_patterns) + broken
all_groups = (special_directives or []) + [(includes, {})]
extensions = []
for modules, directives in all_groups:
exclude_now = excludes[:]
for other_modules, _ in special_directives:
if other_modules != modules:
exclude_now.extend(other_modules)
d = dict(default_directives)
d.update(directives)
extensions.extend(
cythonize(
modules,
exclude=exclude_now,
exclude_failures=True,
language_level=language_level,
compiler_directives=d,
nthreads=parallel,
))
return extensions
def build(extensions):
try:
setup(ext_modules=extensions)
result = True
except:
import traceback
print('error building extensions %s' % (
[ext.name for ext in extensions],))
traceback.print_exc()
result = False
return extensions, result
def _build(args):
sys_args, ext = args
sys.argv[1:] = sys_args
return build([ext])
def parse_args():
from optparse import OptionParser
parser = OptionParser('%prog [options] [LIB_DIR (default: ./Lib)]')
parser.add_option(
'--current-python', dest='current_python', action='store_true',
help='compile the stdlib of the running Python')
parser.add_option(
'-j', '--jobs', dest='parallel_jobs', metavar='N',
type=int, default=1,
help='run builds in N parallel jobs (default: 1)')
parser.add_option(
'-x', '--exclude', dest='excludes', metavar='PATTERN',
action="append", help='exclude modules/packages matching PATTERN')
options, args = parser.parse_args()
if not args:
args = ['./Lib']
elif len(args) > 1:
parser.error('only one argument expected, got %d' % len(args))
return options, args
if __name__ == '__main__':
options, args = parse_args()
if options.current_python:
# assume that the stdlib is where the "os" module lives
os.chdir(os.path.dirname(os.__file__))
else:
os.chdir(args[0])
pool = None
parallel_jobs = options.parallel_jobs
if options.parallel_jobs:
try:
import multiprocessing
pool = multiprocessing.Pool(parallel_jobs)
print("Building in %d parallel processes" % parallel_jobs)
except (ImportError, OSError):
print("Not building in parallel")
parallel_jobs = 0
extensions = build_extensions(
parallel=parallel_jobs,
excludes=options.excludes)
sys_args = ['build_ext', '-i']
if pool is not None:
results = pool.map(_build, [(sys_args, ext) for ext in extensions])
pool.close()
pool.join()
for ext, result in results:
if not result:
print("building extension %s failed" % (ext[0].name,))
else:
sys.argv[1:] = sys_args
build(extensions)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/cython-epydoc.py 0000644 0001751 0000171 00000002634 00000000000 020204 0 ustar 00runner docker 0000000 0000000 #! /usr/bin/env python
# --------------------------------------------------------------------
import re
from epydoc import docstringparser as dsp
CYTHON_SIGNATURE_RE = re.compile(
# Class name (for builtin methods)
r'^\s*((?P\w+)\.)?' +
# The function name
r'(?P\w+)' +
# The parameters
r'\(((?P(?:self|cls|mcs)),?)?(?P.*)\)' +
# The return value (optional)
r'(\s*(->)\s*(?P\w+(?:\s*\w+)))?' +
# The end marker
r'\s*(?:\n|$)')
parse_signature = dsp.parse_function_signature
def parse_function_signature(func_doc, doc_source,
docformat, parse_errors):
PYTHON_SIGNATURE_RE = dsp._SIGNATURE_RE
assert PYTHON_SIGNATURE_RE is not CYTHON_SIGNATURE_RE
try:
dsp._SIGNATURE_RE = CYTHON_SIGNATURE_RE
found = parse_signature(func_doc, doc_source,
docformat, parse_errors)
dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE
if not found:
found = parse_signature(func_doc, doc_source,
docformat, parse_errors)
return found
finally:
dsp._SIGNATURE_RE = PYTHON_SIGNATURE_RE
dsp.parse_function_signature = parse_function_signature
# --------------------------------------------------------------------
from epydoc.cli import cli
cli()
# --------------------------------------------------------------------
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/cython-mode.el 0000644 0001751 0000171 00000027273 00000000000 017623 0 ustar 00runner docker 0000000 0000000 ;;; cython-mode.el --- Major mode for editing Cython files
;; License: Apache-2.0
;;; Commentary:
;; This should work with python-mode.el as well as either the new
;; python.el or the old.
;;; Code:
;; Load python-mode if available, otherwise use builtin emacs python package
(when (not (require 'python-mode nil t))
(require 'python))
(eval-when-compile (require 'rx))
;;;###autoload
(add-to-list 'auto-mode-alist '("\\.pyx\\'" . cython-mode))
;;;###autoload
(add-to-list 'auto-mode-alist '("\\.pxd\\'" . cython-mode))
;;;###autoload
(add-to-list 'auto-mode-alist '("\\.pxi\\'" . cython-mode))
(defvar cython-buffer nil
"Variable pointing to the cython buffer which was compiled.")
(defun cython-compile ()
"Compile the file via Cython."
(interactive)
(let ((cy-buffer (current-buffer)))
(with-current-buffer
(compile compile-command)
(set (make-local-variable 'cython-buffer) cy-buffer)
(add-to-list (make-local-variable 'compilation-finish-functions)
'cython-compilation-finish))))
(defun cython-compilation-finish (buffer how)
"Called when Cython compilation finishes."
;; XXX could annotate source here
)
(defvar cython-mode-map
(let ((map (make-sparse-keymap)))
;; Will inherit from `python-mode-map' thanks to define-derived-mode.
(define-key map "\C-c\C-c" 'cython-compile)
map)
"Keymap used in `cython-mode'.")
(defvar cython-font-lock-keywords
`(;; ctypedef statement: "ctypedef (...type... alias)?"
(,(rx
;; keyword itself
symbol-start (group "ctypedef")
;; type specifier: at least 1 non-identifier symbol + 1 identifier
;; symbol and anything but a comment-starter after that.
(opt (regexp "[^a-zA-Z0-9_\n]+[a-zA-Z0-9_][^#\n]*")
;; type alias: an identifier
symbol-start (group (regexp "[a-zA-Z_]+[a-zA-Z0-9_]*"))
;; space-or-comments till the end of the line
(* space) (opt "#" (* nonl)) line-end))
(1 font-lock-keyword-face)
(2 font-lock-type-face nil 'noerror))
;; new keywords in Cython language
(,(rx symbol-start
(or "by" "cdef" "cimport" "cpdef"
"extern" "gil" "include" "nogil" "property" "public"
"readonly" "DEF" "IF" "ELIF" "ELSE"
"new" "del" "cppclass" "namespace" "const"
"__stdcall" "__cdecl" "__fastcall" "inline" "api")
symbol-end)
. font-lock-keyword-face)
;; Question mark won't match at a symbol-end, so 'except?' must be
;; special-cased. It's simpler to handle it separately than weaving it
;; into the lengthy list of other keywords.
(,(rx symbol-start "except?") . font-lock-keyword-face)
;; C and Python types (highlight as builtins)
(,(rx symbol-start
(or
"object" "dict" "list"
;; basic c type names
"void" "char" "int" "float" "double" "bint"
;; longness/signed/constness
"signed" "unsigned" "long" "short"
;; special basic c types
"size_t" "Py_ssize_t" "Py_UNICODE" "Py_UCS4" "ssize_t" "ptrdiff_t")
symbol-end)
. font-lock-builtin-face)
(,(rx symbol-start "NULL" symbol-end)
. font-lock-constant-face)
;; cdef is used for more than functions, so simply highlighting the next
;; word is problematic. struct, enum and property work though.
(,(rx symbol-start
(group (or "struct" "enum" "union"
(seq "ctypedef" (+ space "fused"))))
(+ space) (group (regexp "[a-zA-Z_]+[a-zA-Z0-9_]*")))
(1 font-lock-keyword-face prepend) (2 font-lock-type-face))
("\\_ (current-indentation) block-indentation)
(or (cython-end-of-statement) t))
;; comment or empty line
(looking-at (rx (0+ space) (or eol "#"))))))
(forward-comment -1))
;; Count trailing space in defun (but not trailing comments).
(skip-syntax-forward " >")
(unless (eobp) ; e.g. missing final newline
(beginning-of-line)))
;; Catch pathological cases like this, where the beginning-of-defun
;; skips to a definition we're not in:
;; if ...:
;; ...
;; else:
;; ... # point here
;; ...
;; def ...
(if (< (point) orig)
(goto-char (point-max)))))
(defun cython-current-defun ()
"`add-log-current-defun-function' for Cython."
(save-excursion
;; Move up the tree of nested `class' and `def' blocks until we
;; get to zero indentation, accumulating the defined names.
(let ((start t)
accum)
(while (or start (> (current-indentation) 0))
(setq start nil)
(cython-beginning-of-block)
(end-of-line)
(beginning-of-defun)
(if (looking-at (rx (0+ space) (or "def" "cdef" "cpdef" "class") (1+ space)
(group (1+ (or word (syntax symbol))))))
(push (match-string 1) accum)))
(if accum (mapconcat 'identity accum ".")))))
;;;###autoload
(define-derived-mode cython-mode python-mode "Cython"
"Major mode for Cython development, derived from Python mode.
\\{cython-mode-map}"
(font-lock-add-keywords nil cython-font-lock-keywords)
(set (make-local-variable 'outline-regexp)
(rx (* space) (or "class" "def" "cdef" "cpdef" "elif" "else" "except" "finally"
"for" "if" "try" "while" "with")
symbol-end))
(set (make-local-variable 'beginning-of-defun-function)
#'cython-beginning-of-defun)
(set (make-local-variable 'end-of-defun-function)
#'cython-end-of-defun)
(set (make-local-variable 'compile-command)
(format cython-default-compile-format (shell-quote-argument (or buffer-file-name ""))))
(set (make-local-variable 'add-log-current-defun-function)
#'cython-current-defun)
(add-hook 'which-func-functions #'cython-current-defun nil t)
(add-to-list (make-local-variable 'compilation-finish-functions)
'cython-compilation-finish))
(provide 'cython-mode)
;;; cython-mode.el ends here
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/cython-numpy-mode-kate.xml 0000644 0001751 0000171 00000127742 00000000000 022115 0 ustar 00runner docker 0000000 0000000
- as
- cimport
- import
- from
- as
- DEF
- IF
- ELIF
- ELSE
- include
- class
- cpdef
- def
- cdef
- ctypedef
- extern
- api
- del
- global
- property
- nogil
- gil
- inline
- readonly
- public
- and
- assert
- in
- is
- by
- not
- or
- sizeof
- print
- break
- continue
- elif
- else
- except
- finally
- for
- if
- pass
- raise
- return
- try
- while
- with
- __import__
- abs
- all
- any
- apply
- basestring
- buffer
- callable
- chr
- classmethod
- cmp
- coerce
- compile
- delattr
- dir
- divmod
- enumerate
- eval
- execfile
- filter
- getattr
- hasattr
- hash
- hex
- id
- input
- intern
- isinstance
- issubclass
- iter
- len
- map
- max
- min
- oct
- open
- ord
- pow
- range
- raw_input
- reduce
- reload
- repr
- reversed
- round
- setattr
- sorted
- staticmethod
- sum
- super
- type
- unichr
- unicode
- xrange
- zip
- unsigned
- void
- enum
- double
- long
- short
- char
- Py_ssize_t
- Py_intptr_t
- Py_buffer
- bint
- struct
- union
- enum
- int
- float
- object
- list
- tuple
- str
- dict
- set
- frozenset
- slice
- bool
- complex
- file
- np
- numpy
- numpy
- cython
- dtype
- flatiter
- broadcast
- ndarray
- int8_t
- int16_t
- int32_t
- int64_t
- uint8_t
- uint16_t
- uint32_t
- uint64_t
- float32_t
- float64_t
- complex64_t
- complex128_t
- int_t
- long_t
- uint_t
- ulong_t
- float_t
- double_t
- longdouble_t
- cfloat_t
- cdouble_t
- clongdouble_t
- complex_t
- npy_int8
- npy_int16
- npy_int32
- npy_int64
- npy_int96
- npy_int128
- npy_uint8
- npy_uint16
- npy_uint32
- npy_uint64
- npy_uint96
- npy_uint128
- npy_float32
- npy_float64
- npy_float80
- npy_float96
- npy_float128
- npy_complex64
- npy_complex128
- npy_complex120
- npy_complex192
- npy_complex256
- npy_cfloat
- npy_cdouble
- npy_clongdouble
- npy_bool
- npy_byte
- npy_short
- npy_int
- npy_long
- npy_longlong
- npy_ubyte
- npy_ushort
- npy_uint
- npy_ulong
- npy_ulonglong
- npy_float
- npy_double
- npy_longdouble
- npy_intp
- DataSource
- MachAr
- PackageLoader
- RankWarning
- Tester
- abs
- absolute
- add
- add_docstring
- add_newdoc
- alen
- all
- allclose
- alltrue
- alterdot
- amax
- amin
- angle
- any
- append
- apply_along_axis
- apply_over_axes
- arange
- arccos
- arccosh
- arcsin
- arcsinh
- arctan
- arctan2
- arctanh
- argmax
- argmin
- argsort
- argwhere
- around
- array
- array2string
- array_equal
- array_equiv
- array_repr
- array_split
- array_str
- asanyarray
- asarray
- asarray_chkfinite
- ascontiguousarray
- asfarray
- asfortranarray
- asmatrix
- asscalar
- atleast_1d
- atleast_2d
- atleast_3d
- average
- bartlett
- base_repr
- bench
- binary_repr
- bincount
- bitwise_and
- bitwise_not
- bitwise_or
- bitwise_xor
- blackman
- bmat
- bool
- bool8
- bool_
- broadcast
- broadcast_arrays
- byte
- byte_bounds
- can_cast
- cdouble
- ceil
- cfloat
- character
- chararray
- choose
- clip
- clongdouble
- clongfloat
- column_stack
- common_type
- compare_chararrays
- complex
- complex128
- complex192
- complex64
- complex_
- complexfloating
- compress
- concatenate
- conj
- conjugate
- convolve
- copy
- corrcoef
- correlate
- cos
- cosh
- cov
- cross
- csingle
- cumprod
- cumproduct
- cumsum
- deg2rad
- degrees
- delete
- deprecate
- deprecate_with_doc
- diag
- diagflat
- diagonal
- diff
- digitize
- disp
- divide
- dot
- double
- dsplit
- dstack
- dtype
- ediff1d
- empty
- empty_like
- equal
- errstate
- exp
- exp2
- expand_dims
- expm1
- extract
- eye
- fabs
- fastCopyAndTranspose
- find_common_type
- finfo
- fix
- flatiter
- flatnonzero
- flexible
- fliplr
- flipud
- float
- float32
- float64
- float96
- float_
- floating
- floor
- floor_divide
- fmax
- fmin
- fmod
- frexp
- frombuffer
- fromfile
- fromfunction
- fromiter
- frompyfunc
- fromregex
- fromstring
- fv
- generic
- genfromtxt
- get_array_wrap
- get_include
- get_numarray_include
- get_numpy_include
- get_printoptions
- getbuffer
- getbufsize
- geterr
- geterrcall
- geterrobj
- gradient
- greater
- greater_equal
- hamming
- hanning
- histogram
- histogram2d
- histogramdd
- hsplit
- hstack
- hypot
- i0
- identity
- imag
- indices
- inexact
- info
- inner
- insert
- int
- int0
- int16
- int32
- int64
- int8
- int_
- int_asbuffer
- intc
- integer
- interp
- intersect1d
- intersect1d_nu
- intp
- invert
- ipmt
- irr
- iscomplex
- iscomplexobj
- isfinite
- isfortran
- isinf
- isnan
- isneginf
- isposinf
- isreal
- isrealobj
- isscalar
- issctype
- issubclass_
- issubdtype
- issubsctype
- iterable
- ix_
- kaiser
- kron
- ldexp
- left_shift
- less
- less_equal
- lexsort
- linspace
- load
- loads
- loadtxt
- log
- log10
- log1p
- log2
- logaddexp
- logaddexp2
- logical_and
- logical_not
- logical_or
- logical_xor
- logspace
- long
- longcomplex
- longdouble
- longfloat
- longlong
- lookfor
- mafromtxt
- mat
- matrix
- max
- maximum
- maximum_sctype
- may_share_memory
- mean
- median
- memmap
- meshgrid
- min
- minimum
- mintypecode
- mirr
- mod
- modf
- msort
- multiply
- nan_to_num
- nanargmax
- nanargmin
- nanmax
- nanmin
- nansum
- ndarray
- ndenumerate
- ndfromtxt
- ndim
- ndindex
- negative
- newbuffer
- nonzero
- not_equal
- nper
- npv
- number
- obj2sctype
- object
- object0
- object_
- ones
- ones_like
- outer
- packbits
- piecewise
- pkgload
- place
- pmt
- poly
- poly1d
- polyadd
- polyder
- polydiv
- polyfit
- polyint
- polymul
- polysub
- polyval
- power
- ppmt
- prod
- product
- ptp
- put
- putmask
- pv
- rad2deg
- radians
- rank
- rate
- ravel
- real
- real_if_close
- recarray
- recfromcsv
- recfromtxt
- reciprocal
- record
- remainder
- repeat
- require
- reshape
- resize
- restoredot
- right_shift
- rint
- roll
- rollaxis
- roots
- rot90
- round
- round_
- row_stack
- safe_eval
- save
- savetxt
- savez
- sctype2char
- searchsorted
- select
- set_numeric_ops
- set_printoptions
- set_string_function
- setbufsize
- setdiff1d
- seterr
- seterrcall
- seterrobj
- setmember1d
- setxor1d
- shape
- short
- show_config
- sign
- signbit
- signedinteger
- sin
- sinc
- single
- singlecomplex
- sinh
- size
- sometrue
- sort
- sort_complex
- source
- split
- sqrt
- square
- squeeze
- std
- str
- str_
- string0
- string_
- subtract
- sum
- swapaxes
- take
- tan
- tanh
- tensordot
- test
- tile
- trace
- transpose
- trapz
- tri
- tril
- trim_zeros
- triu
- true_divide
- trunc
- typename
- ubyte
- ufunc
- uint
- uint0
- uint16
- uint32
- uint64
- uint8
- uintc
- uintp
- ulonglong
- unicode
- unicode0
- unicode_
- union1d
- unique
- unique1d
- unpackbits
- unravel_index
- unsignedinteger
- unwrap
- ushort
- vander
- var
- vdot
- vectorize
- void
- void0
- vsplit
- vstack
- where
- who
- zeros
- zeros_like
- __future__
- __import__
- __name__
- __cythonbufferdefaults__
- __weakref__
- None
- self
- True
- False
- NotImplemented
- Ellipsis
- NULL
- __new__
- __init__
- __cinit__
- __dealloc__
- __cmp__
- __richcmp__
- __str__
- __repr__
- __hash__
- __call__
- __iter__
- __getattr__
- __setattr__
- __delattr__
- __add__
- __sub__
- __mul__
- __div__
- __floordiv__
- __truediv__
- __mod__
- __divmod__
- __pow__
- __neg__
- __pos__
- __abs__
- __nonzero__
- __invert__
- __lshift__
- __rshift__
- __and__
- __or__
- __xor__
- __int__
- __long__
- __float__
- __oct__
- __hex__
- __index__
- __iadd__
- __isub__
- __imul__
- __idiv__
- __ifloordiv__
- __itruediv__
- __imod__
- __ipow__
- __ilshift__
- __irshift__
- __iand__
- __ior__
- __ixor__
- __len__
- __getitem__
- __setitem__
- __delitem__
- __getslice__
- __setslice__
- __delslice__
- __contains__
- __next__
- __getreadbuffer__
- __getwritebuffer__
- __getsegcount__
- __getcharbuffer__
- __get__
- __set__
- __delete__
- __getbuffer__
- __releasebuffer__
- ArithmeticError
- AssertionError
- AttributeError
- BaseException
- DeprecationWarning
- EnvironmentError
- EOFError
- Exception
- FloatingPointError
- FutureWarning
- GeneratorExit
- IOError
- ImportError
- ImportWarning
- IndexError
- KeyError
- KeyboardInterrupt
- LookupError
- MemoryError
- NameError
- NotImplementedError
- OSError
- OverflowError
- PendingDeprecationWarning
- ReferenceError
- RuntimeError
- RuntimeWarning
- StandardError
- StopIteration
- SyntaxError
- SyntaxWarning
- SystemError
- SystemExit
- TypeError
- UnboundLocalError
- UserWarning
- UnicodeError
- UnicodeWarning
- UnicodeEncodeError
- UnicodeDecodeError
- UnicodeTranslateError
- ValueError
- Warning
- WindowsError
- ZeroDivisionError
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/cython.st 0000644 0001751 0000171 00000001152 00000000000 016713 0 ustar 00runner docker 0000000 0000000 /**
* Name: pyrex
* Description: Pyrex - a Language for Writing Python Extension Modules
* Author: Markku Rossi
*/
state pyrex extends python
{
/* Additional keywords.
(build-re '( NULL as cdef char ctypedef double enum extern float
include int long private public short signed sizeof struct union
unsigned void )) */
/\b(NULL|as|c(def|har|typedef)|double|e(num|xtern)|float|in(clude|t)\
|long|p(rivate|ublic)|s(hort|i(gned|zeof)|truct)|un(ion|signed)|void)\b/ {
keyword_face(true);
language_print($0);
keyword_face(false);
}
}
/*
Local variables:
mode: c
End:
*/
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/jedityper.py 0000644 0001751 0000171 00000010246 00000000000 017414 0 ustar 00runner docker 0000000 0000000 """
Inject Cython type declarations into a .py file using the Jedi static analysis tool.
"""
from __future__ import absolute_import
from io import open
from collections import defaultdict
from itertools import chain
import jedi
from jedi.parser.tree import Module, ImportName
from jedi.evaluate.representation import Function, Instance, Class
from jedi.evaluate.iterable import ArrayMixin, GeneratorComprehension
from Cython.Utils import open_source_file
default_type_map = {
'float': 'double',
'int': 'long',
}
def analyse(source_path=None, code=None):
"""
Analyse a Python source code file with Jedi.
Returns a mapping from (scope-name, (line, column)) pairs to a name-types mapping.
"""
if not source_path and code is None:
raise ValueError("Either 'source_path' or 'code' is required.")
scoped_names = {}
statement_iter = jedi.names(source=code, path=source_path, all_scopes=True)
for statement in statement_iter:
parent = statement.parent()
scope = parent._definition
evaluator = statement._evaluator
# skip function/generator definitions, class definitions, and module imports
if any(isinstance(statement._definition, t) for t in [Function, Class, ImportName]):
continue
key = (None if isinstance(scope, Module) else str(parent.name), scope.start_pos)
try:
names = scoped_names[key]
except KeyError:
names = scoped_names[key] = defaultdict(set)
position = statement.start_pos if statement.name in names else None
for name_type in evaluator.find_types(scope, statement.name, position=position ,search_global=True):
if isinstance(name_type, Instance):
if isinstance(name_type.base, Class):
type_name = 'object'
else:
type_name = name_type.base.obj.__name__
elif isinstance(name_type, ArrayMixin):
type_name = name_type.type
elif isinstance(name_type, GeneratorComprehension):
type_name = None
else:
try:
type_name = type(name_type.obj).__name__
except AttributeError as error:
type_name = None
if type_name is not None:
names[str(statement.name)].add(type_name)
return scoped_names
def inject_types(source_path, types, type_map=default_type_map, mode='python'):
"""
Hack type declarations into source code file.
@param mode is currently 'python', which means that the generated type declarations use pure Python syntax.
"""
col_and_types_by_line = dict(
# {line: (column, scope_name or None, [(name, type)])}
(k[-1][0], (k[-1][1], k[0], [(n, next(iter(t))) for (n, t) in v.items() if len(t) == 1]))
for (k, v) in types.items())
lines = [u'import cython\n']
with open_source_file(source_path) as f:
for line_no, line in enumerate(f, 1):
if line_no in col_and_types_by_line:
col, scope, types = col_and_types_by_line[line_no]
if types:
types = ', '.join("%s='%s'" % (name, type_map.get(type_name, type_name))
for name, type_name in types)
if scope is None:
type_decl = u'{indent}cython.declare({types})\n'
else:
type_decl = u'{indent}@cython.locals({types})\n'
lines.append(type_decl.format(indent=' '*col, types=types))
lines.append(line)
return lines
def main(file_paths=None, overwrite=False):
"""
Main entry point to process a list of .py files and inject type inferred declarations.
"""
if file_paths is None:
import sys
file_paths = sys.argv[1:]
for source_path in file_paths:
types = analyse(source_path)
lines = inject_types(source_path, types)
target_path = source_path + ('' if overwrite else '_typed.py')
with open(target_path, 'w', encoding='utf8') as f:
for line in lines:
f.write(line)
if __name__ == '__main__':
main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/kate.diff 0000644 0001751 0000171 00000133027 00000000000 016624 0 ustar 00runner docker 0000000 0000000 # HG changeset patch
# User Sturla Molden
# Date 1256723843 25200
# Node ID 0a6ce52272f641d58c874fa007187778d4c2c81c
# Parent db4133d43a7ee34d4f172aced054785acba65a57
Syntax highlighting for Cython and NumPy for KATE and KDevelop.
diff -r db4133d43a7e -r 0a6ce52272f6 Tools/cython-numpy-mode-kate.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/Tools/cython-numpy-mode-kate.xml Wed Oct 28 02:57:23 2009 -0700
@@ -0,0 +1,1133 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ - as
+
+
+ - cimport
+ - import
+ - from
+ - as
+
+
+ - DEF
+ - IF
+ - ELIF
+ - ELSE
+ - include
+
+
+ - class
+ - cpdef
+ - def
+
+
+ - cdef
+ - ctypedef
+
+
+ - extern
+ - api
+ - del
+ - global
+ - property
+ - nogil
+ - gil
+ - inline
+
+
+ - readonly
+ - public
+
+
+ - and
+ - assert
+ - in
+ - is
+ - by
+ - not
+ - or
+ - sizeof
+
+
+
+ - print
+
+
+ - break
+ - continue
+ - elif
+ - else
+ - except
+ - finally
+ - for
+ - if
+ - pass
+ - raise
+ - return
+ - try
+ - while
+ - with
+
+
+
+ - __import__
+ - abs
+ - all
+ - any
+ - apply
+ - basestring
+ - buffer
+ - callable
+ - chr
+ - classmethod
+ - cmp
+ - coerce
+ - compile
+ - delattr
+ - dir
+ - divmod
+ - enumerate
+ - eval
+ - execfile
+ - filter
+ - getattr
+
+ - hasattr
+ - hash
+ - hex
+ - id
+ - input
+ - intern
+ - isinstance
+ - issubclass
+ - iter
+ - len
+
+ - map
+ - max
+ - min
+ - oct
+ - open
+ - ord
+ - pow
+
+ - range
+ - raw_input
+ - reduce
+ - reload
+ - repr
+ - reversed
+ - round
+ - setattr
+ - sorted
+ - staticmethod
+ - sum
+ - super
+ - type
+ - unichr
+ - unicode
+
+ - xrange
+ - zip
+
+
+
+ - unsigned
+ - void
+ - enum
+ - double
+ - long
+ - short
+ - char
+ - Py_ssize_t
+ - Py_intptr_t
+ - Py_buffer
+ - bint
+ - struct
+ - union
+ - enum
+
+
+
+ - int
+ - float
+ - object
+ - list
+ - tuple
+ - str
+ - dict
+ - set
+ - frozenset
+ - slice
+ - bool
+ - complex
+ - file
+
+
+
+ - np
+ - numpy
+
+
+ - numpy
+
+
+ - cython
+
+
+ - dtype
+ - flatiter
+ - broadcast
+ - ndarray
+ - int8_t
+ - int16_t
+ - int32_t
+ - int64_t
+ - uint8_t
+ - uint16_t
+ - uint32_t
+ - uint64_t
+ - float32_t
+ - float64_t
+ - complex64_t
+ - complex128_t
+ - int_t
+ - long_t
+ - uint_t
+ - ulong_t
+ - float_t
+ - double_t
+ - longdouble_t
+ - cfloat_t
+ - cdouble_t
+ - clongdouble_t
+ - complex_t
+ - npy_int8
+ - npy_int16
+ - npy_int32
+ - npy_int64
+ - npy_int96
+ - npy_int128
+ - npy_uint8
+ - npy_uint16
+ - npy_uint32
+ - npy_uint64
+ - npy_uint96
+ - npy_uint128
+ - npy_float32
+ - npy_float64
+ - npy_float80
+ - npy_float96
+ - npy_float128
+ - npy_complex64
+ - npy_complex128
+ - npy_complex120
+ - npy_complex192
+ - npy_complex256
+ - npy_cfloat
+ - npy_cdouble
+ - npy_clongdouble
+ - npy_bool
+ - npy_byte
+ - npy_short
+ - npy_int
+ - npy_long
+ - npy_longlong
+ - npy_ubyte
+ - npy_ushort
+ - npy_uint
+ - npy_ulong
+ - npy_ulonglong
+ - npy_float
+ - npy_double
+ - npy_longdouble
+ - npy_intp
+
+
+ - DataSource
+ - MachAr
+ - PackageLoader
+ - RankWarning
+ - Tester
+ - abs
+ - absolute
+ - add
+ - add_docstring
+ - add_newdoc
+ - alen
+ - all
+ - allclose
+ - alltrue
+ - alterdot
+ - amax
+ - amin
+ - angle
+ - any
+ - append
+ - apply_along_axis
+ - apply_over_axes
+ - arange
+ - arccos
+ - arccosh
+ - arcsin
+ - arcsinh
+ - arctan
+ - arctan2
+ - arctanh
+ - argmax
+ - argmin
+ - argsort
+ - argwhere
+ - around
+ - array
+ - array2string
+ - array_equal
+ - array_equiv
+ - array_repr
+ - array_split
+ - array_str
+ - asanyarray
+ - asarray
+ - asarray_chkfinite
+ - ascontiguousarray
+ - asfarray
+ - asfortranarray
+ - asmatrix
+ - asscalar
+ - atleast_1d
+ - atleast_2d
+ - atleast_3d
+ - average
+ - bartlett
+ - base_repr
+ - bench
+ - binary_repr
+ - bincount
+ - bitwise_and
+ - bitwise_not
+ - bitwise_or
+ - bitwise_xor
+ - blackman
+ - bmat
+ - bool
+ - bool8
+ - bool_
+ - broadcast
+ - broadcast_arrays
+ - byte
+ - byte_bounds
+ - can_cast
+ - cdouble
+ - ceil
+ - cfloat
+ - character
+ - chararray
+ - choose
+ - clip
+ - clongdouble
+ - clongfloat
+ - column_stack
+ - common_type
+ - compare_chararrays
+ - complex
+ - complex128
+ - complex192
+ - complex64
+ - complex_
+ - complexfloating
+ - compress
+ - concatenate
+ - conj
+ - conjugate
+ - convolve
+ - copy
+ - corrcoef
+ - correlate
+ - cos
+ - cosh
+ - cov
+ - cross
+ - csingle
+ - cumprod
+ - cumproduct
+ - cumsum
+ - deg2rad
+ - degrees
+ - delete
+ - deprecate
+ - deprecate_with_doc
+ - diag
+ - diagflat
+ - diagonal
+ - diff
+ - digitize
+ - disp
+ - divide
+ - dot
+ - double
+ - dsplit
+ - dstack
+ - dtype
+ - ediff1d
+ - empty
+ - empty_like
+ - equal
+ - errstate
+ - exp
+ - exp2
+ - expand_dims
+ - expm1
+ - extract
+ - eye
+ - fabs
+ - fastCopyAndTranspose
+ - find_common_type
+ - finfo
+ - fix
+ - flatiter
+ - flatnonzero
+ - flexible
+ - fliplr
+ - flipud
+ - float
+ - float32
+ - float64
+ - float96
+ - float_
+ - floating
+ - floor
+ - floor_divide
+ - fmax
+ - fmin
+ - fmod
+ - frexp
+ - frombuffer
+ - fromfile
+ - fromfunction
+ - fromiter
+ - frompyfunc
+ - fromregex
+ - fromstring
+ - fv
+ - generic
+ - genfromtxt
+ - get_array_wrap
+ - get_include
+ - get_numarray_include
+ - get_numpy_include
+ - get_printoptions
+ - getbuffer
+ - getbufsize
+ - geterr
+ - geterrcall
+ - geterrobj
+ - gradient
+ - greater
+ - greater_equal
+ - hamming
+ - hanning
+ - histogram
+ - histogram2d
+ - histogramdd
+ - hsplit
+ - hstack
+ - hypot
+ - i0
+ - identity
+ - imag
+ - indices
+ - inexact
+ - info
+ - inner
+ - insert
+ - int
+ - int0
+ - int16
+ - int32
+ - int64
+ - int8
+ - int_
+ - int_asbuffer
+ - intc
+ - integer
+ - interp
+ - intersect1d
+ - intersect1d_nu
+ - intp
+ - invert
+ - ipmt
+ - irr
+ - iscomplex
+ - iscomplexobj
+ - isfinite
+ - isfortran
+ - isinf
+ - isnan
+ - isneginf
+ - isposinf
+ - isreal
+ - isrealobj
+ - isscalar
+ - issctype
+ - issubclass_
+ - issubdtype
+ - issubsctype
+ - iterable
+ - ix_
+ - kaiser
+ - kron
+ - ldexp
+ - left_shift
+ - less
+ - less_equal
+ - lexsort
+ - linspace
+ - load
+ - loads
+ - loadtxt
+ - log
+ - log10
+ - log1p
+ - log2
+ - logaddexp
+ - logaddexp2
+ - logical_and
+ - logical_not
+ - logical_or
+ - logical_xor
+ - logspace
+ - long
+ - longcomplex
+ - longdouble
+ - longfloat
+ - longlong
+ - lookfor
+ - mafromtxt
+ - mat
+ - matrix
+ - max
+ - maximum
+ - maximum_sctype
+ - may_share_memory
+ - mean
+ - median
+ - memmap
+ - meshgrid
+ - min
+ - minimum
+ - mintypecode
+ - mirr
+ - mod
+ - modf
+ - msort
+ - multiply
+ - nan_to_num
+ - nanargmax
+ - nanargmin
+ - nanmax
+ - nanmin
+ - nansum
+ - ndarray
+ - ndenumerate
+ - ndfromtxt
+ - ndim
+ - ndindex
+ - negative
+ - newbuffer
+ - nonzero
+ - not_equal
+ - nper
+ - npv
+ - number
+ - obj2sctype
+ - object
+ - object0
+ - object_
+ - ones
+ - ones_like
+ - outer
+ - packbits
+ - piecewise
+ - pkgload
+ - place
+ - pmt
+ - poly
+ - poly1d
+ - polyadd
+ - polyder
+ - polydiv
+ - polyfit
+ - polyint
+ - polymul
+ - polysub
+ - polyval
+ - power
+ - ppmt
+ - prod
+ - product
+ - ptp
+ - put
+ - putmask
+ - pv
+ - rad2deg
+ - radians
+ - rank
+ - rate
+ - ravel
+ - real
+ - real_if_close
+ - recarray
+ - recfromcsv
+ - recfromtxt
+ - reciprocal
+ - record
+ - remainder
+ - repeat
+ - require
+ - reshape
+ - resize
+ - restoredot
+ - right_shift
+ - rint
+ - roll
+ - rollaxis
+ - roots
+ - rot90
+ - round
+ - round_
+ - row_stack
+ - safe_eval
+ - save
+ - savetxt
+ - savez
+ - sctype2char
+ - searchsorted
+ - select
+ - set_numeric_ops
+ - set_printoptions
+ - set_string_function
+ - setbufsize
+ - setdiff1d
+ - seterr
+ - seterrcall
+ - seterrobj
+ - setmember1d
+ - setxor1d
+ - shape
+ - short
+ - show_config
+ - sign
+ - signbit
+ - signedinteger
+ - sin
+ - sinc
+ - single
+ - singlecomplex
+ - sinh
+ - size
+ - sometrue
+ - sort
+ - sort_complex
+ - source
+ - split
+ - sqrt
+ - square
+ - squeeze
+ - std
+ - str
+ - str_
+ - string0
+ - string_
+ - subtract
+ - sum
+ - swapaxes
+ - take
+ - tan
+ - tanh
+ - tensordot
+ - test
+ - tile
+ - trace
+ - transpose
+ - trapz
+ - tri
+ - tril
+ - trim_zeros
+ - triu
+ - true_divide
+ - trunc
+ - typename
+ - ubyte
+ - ufunc
+ - uint
+ - uint0
+ - uint16
+ - uint32
+ - uint64
+ - uint8
+ - uintc
+ - uintp
+ - ulonglong
+ - unicode
+ - unicode0
+ - unicode_
+ - union1d
+ - unique
+ - unique1d
+ - unpackbits
+ - unravel_index
+ - unsignedinteger
+ - unwrap
+ - ushort
+ - vander
+ - var
+ - vdot
+ - vectorize
+ - void
+ - void0
+ - vsplit
+ - vstack
+ - where
+ - who
+ - zeros
+ - zeros_like
+
+
+ - __future__
+ - __import__
+ - __name__
+ - __cythonbufferdefaults__
+ - __weakref__
+ - None
+ - self
+ - True
+ - False
+ - NotImplemented
+ - Ellipsis
+ - NULL
+
+
+ - __new__
+ - __init__
+ - __cinit__
+ - __dealloc__
+ - __cmp__
+ - __richcmp__
+ - __str__
+ - __repr__
+ - __hash__
+ - __call__
+ - __iter__
+ - __getattr__
+ - __setattr__
+ - __delattr__
+ - __add__
+ - __sub__
+ - __mul__
+ - __div__
+ - __floordiv__
+ - __truediv__
+ - __mod__
+ - __divmod__
+ - __pow__
+ - __neg__
+ - __pos__
+ - __abs__
+ - __nonzero__
+ - __invert__
+ - __lshift__
+ - __rshift__
+ - __and__
+ - __or__
+ - __xor__
+ - __int__
+ - __long__
+ - __float__
+ - __oct__
+ - __hex__
+ - __index__
+ - __iadd__
+ - __isub__
+ - __imul__
+ - __idiv__
+ - __ifloordiv__
+ - __itruediv__
+ - __imod__
+ - __ipow__
+ - __ilshift__
+ - __irshift__
+ - __iand__
+ - __ior__
+ - __ixor__
+ - __len__
+ - __getitem__
+ - __setitem__
+ - __delitem__
+ - __getslice__
+ - __setslice__
+ - __delslice__
+ - __contains__
+ - __next__
+ - __getreadbuffer__
+ - __getwritebuffer__
+ - __getsegcount__
+ - __getcharbuffer__
+ - __get__
+ - __set__
+ - __delete__
+ - __getbuffer__
+ - __releasebuffer__
+
+
+ - ArithmeticError
+ - AssertionError
+ - AttributeError
+ - BaseException
+ - DeprecationWarning
+ - EnvironmentError
+ - EOFError
+ - Exception
+ - FloatingPointError
+ - FutureWarning
+ - GeneratorExit
+ - IOError
+ - ImportError
+ - ImportWarning
+ - IndexError
+ - KeyError
+ - KeyboardInterrupt
+ - LookupError
+ - MemoryError
+ - NameError
+ - NotImplementedError
+ - OSError
+ - OverflowError
+ - PendingDeprecationWarning
+ - ReferenceError
+ - RuntimeError
+ - RuntimeWarning
+ - StandardError
+ - StopIteration
+ - SyntaxError
+ - SyntaxWarning
+ - SystemError
+ - SystemExit
+ - TypeError
+ - UnboundLocalError
+ - UserWarning
+ - UnicodeError
+ - UnicodeWarning
+ - UnicodeEncodeError
+ - UnicodeDecodeError
+ - UnicodeTranslateError
+ - ValueError
+ - Warning
+ - WindowsError
+ - ZeroDivisionError
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/rules.bzl 0000644 0001751 0000171 00000004777 00000000000 016722 0 ustar 00runner docker 0000000 0000000 """
Defines a pyx_library() macros corresponding to py_library.
Uses Cython to compile .pyx files (and .py files with corresponding .pxd
files) to Python extension modules.
Example:
# Assuming Cython is mapped to "cython" in your workspace.
load("@cython//Tools:rules.bzl", "pyx_library")
pyx_library(name = 'mylib',
srcs = ['a.pyx', 'a.pxd', 'b.py', 'pkg/__init__.py', 'pkg/c.pyx'],
py_deps = ['//py_library/dep'],
data = ['//other/data'],
)
The __init__.py file must be in your srcs list so that Cython can resolve
cimports using the package layout.
"""
def pyx_library(
name,
deps=[],
srcs=[],
cython_directives=[],
cython_options=[],
**kwargs):
# First filter out files that should be run compiled vs. passed through.
py_srcs = []
pyx_srcs = []
pxd_srcs = []
for src in srcs:
if src.endswith('.pyx') or (src.endswith('.py')
and src[:-3] + '.pxd' in srcs):
pyx_srcs.append(src)
elif src.endswith('.py'):
py_srcs.append(src)
else:
pxd_srcs.append(src)
if src.endswith('__init__.py'):
# TODO(robertwb): Infer __init__.py files/package root?
pxd_srcs.append(src)
# Invoke cythonize to produce the shared object libraries.
outs = [src.split('.')[0] + '.so' for src in pyx_srcs]
extra_flags = " ".join(["-X '%s=%s'" % x for x in cython_directives] +
["-s '%s=%s'" % x for x in cython_options])
# TODO(robertwb): It might be better to only generate the C files,
# letting cc_library (or similar) handle the rest, but there isn't yet
# support compiling Python C extensions from bazel.
native.genrule(
name = name + "_cythonize",
srcs = pyx_srcs,
outs = outs,
cmd = "PYTHONHASHSEED=0 $(location @cython//:cythonize) -k -i %s $(SRCS)" % extra_flags
# Rename outputs to expected location.
# TODO(robertwb): Add an option to cythonize itself to do this.
+ """ && python -c 'import os, sys; n = len(sys.argv); [os.rename(src.split(".")[0] + ".so", dst) for src, dst in zip(sys.argv[1:], sys.argv[1+n//2:])]' $(SRCS) $(OUTS)""",
tools = ["@cython//:cythonize"] + pxd_srcs,
)
# Now create a py_library with these shared objects as data.
native.py_library(
name=name,
srcs=py_srcs,
deps=deps,
data=outs + pyx_srcs + pxd_srcs,
**kwargs
)
././@PaxHeader 0000000 0000000 0000000 00000000033 00000000000 011451 x ustar 00 0000000 0000000 27 mtime=1645055920.652294
Cython-0.29.28/Tools/site_scons/ 0000755 0001751 0000171 00000000000 00000000000 017211 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/Tools/site_scons/site_tools/ 0000755 0001751 0000171 00000000000 00000000000 021375 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/site_scons/site_tools/cython.py 0000644 0001751 0000171 00000003271 00000000000 023256 0 ustar 00runner docker 0000000 0000000 """
Tool to run Cython files (.pyx) into .c and .cpp.
TODO:
- Add support for dynamically selecting in-process Cython
through CYTHONINPROCESS variable.
- Have a CYTHONCPP option which turns on C++ in flags and
changes output extension at the same time
VARIABLES:
- CYTHON - The path to the "cython" command line tool.
- CYTHONFLAGS - Flags to pass to the "cython" command line tool.
AUTHORS:
- David Cournapeau
- Dag Sverre Seljebotn
"""
import SCons
from SCons.Builder import Builder
from SCons.Action import Action
#def cython_action(target, source, env):
# print target, source, env
# from Cython.Compiler.Main import compile as cython_compile
# res = cython_compile(str(source[0]))
cythonAction = Action("$CYTHONCOM")
def create_builder(env):
try:
cython = env['BUILDERS']['Cython']
except KeyError:
cython = SCons.Builder.Builder(
action = cythonAction,
emitter = {},
suffix = cython_suffix_emitter,
single_source = 1)
env['BUILDERS']['Cython'] = cython
return cython
def cython_suffix_emitter(env, source):
return "$CYTHONCFILESUFFIX"
def generate(env):
env["CYTHON"] = "cython"
env["CYTHONCOM"] = "$CYTHON $CYTHONFLAGS -o $TARGET $SOURCE"
env["CYTHONCFILESUFFIX"] = ".c"
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.suffix['.pyx'] = cython_suffix_emitter
c_file.add_action('.pyx', cythonAction)
c_file.suffix['.py'] = cython_suffix_emitter
c_file.add_action('.py', cythonAction)
create_builder(env)
def exists(env):
try:
# import Cython
return True
except ImportError:
return False
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/Tools/site_scons/site_tools/pyext.py 0000644 0001751 0000171 00000022012 00000000000 023115 0 ustar 00runner docker 0000000 0000000 """SCons.Tool.pyext
Tool-specific initialization for python extensions builder.
AUTHORS:
- David Cournapeau
- Dag Sverre Seljebotn
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import sys
import SCons
from SCons.Tool import SourceFileScanner, ProgramScanner
# Create common python builders
def createPythonObjectBuilder(env):
"""This is a utility function that creates the PythonObject Builder in an
Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
pyobj = env['BUILDERS']['PythonObject']
except KeyError:
pyobj = SCons.Builder.Builder(action = {},
emitter = {},
prefix = '$PYEXTOBJPREFIX',
suffix = '$PYEXTOBJSUFFIX',
src_builder = ['CFile', 'CXXFile'],
source_scanner = SourceFileScanner,
single_source = 1)
env['BUILDERS']['PythonObject'] = pyobj
return pyobj
def createPythonExtensionBuilder(env):
"""This is a utility function that creates the PythonExtension Builder in
an Environment if it is not there already.
If it is already there, we return the existing one.
"""
try:
pyext = env['BUILDERS']['PythonExtension']
except KeyError:
import SCons.Action
import SCons.Defaults
action = SCons.Action.Action("$PYEXTLINKCOM", "$PYEXTLINKCOMSTR")
action_list = [ SCons.Defaults.SharedCheck,
action]
pyext = SCons.Builder.Builder(action = action_list,
emitter = "$SHLIBEMITTER",
prefix = '$PYEXTPREFIX',
suffix = '$PYEXTSUFFIX',
target_scanner = ProgramScanner,
src_suffix = '$PYEXTOBJSUFFIX',
src_builder = 'PythonObject')
env['BUILDERS']['PythonExtension'] = pyext
return pyext
def pyext_coms(platform):
"""Return PYEXTCCCOM, PYEXTCXXCOM and PYEXTLINKCOM for the given
platform."""
if platform == 'win32':
pyext_cccom = "$PYEXTCC /Fo$TARGET /c $PYEXTCCSHARED "\
"$PYEXTCFLAGS $PYEXTCCFLAGS $_CCCOMCOM "\
"$_PYEXTCPPINCFLAGS $SOURCES"
pyext_cxxcom = "$PYEXTCXX /Fo$TARGET /c $PYEXTCSHARED "\
"$PYEXTCXXFLAGS $PYEXTCCFLAGS $_CCCOMCOM "\
"$_PYEXTCPPINCFLAGS $SOURCES"
pyext_linkcom = '${TEMPFILE("$PYEXTLINK $PYEXTLINKFLAGS '\
'/OUT:$TARGET.windows $( $_LIBDIRFLAGS $) '\
'$_LIBFLAGS $_PYEXTRUNTIME $SOURCES.windows")}'
else:
pyext_cccom = "$PYEXTCC -o $TARGET -c $PYEXTCCSHARED "\
"$PYEXTCFLAGS $PYEXTCCFLAGS $_CCCOMCOM "\
"$_PYEXTCPPINCFLAGS $SOURCES"
pyext_cxxcom = "$PYEXTCXX -o $TARGET -c $PYEXTCSHARED "\
"$PYEXTCXXFLAGS $PYEXTCCFLAGS $_CCCOMCOM "\
"$_PYEXTCPPINCFLAGS $SOURCES"
pyext_linkcom = "$PYEXTLINK -o $TARGET $PYEXTLINKFLAGS "\
"$SOURCES $_LIBDIRFLAGS $_LIBFLAGS $_PYEXTRUNTIME"
if platform == 'darwin':
pyext_linkcom += ' $_FRAMEWORKPATH $_FRAMEWORKS $FRAMEWORKSFLAGS'
return pyext_cccom, pyext_cxxcom, pyext_linkcom
def set_basic_vars(env):
# Set construction variables which are independent on whether we are using
# distutils or not.
env['PYEXTCPPPATH'] = SCons.Util.CLVar('$PYEXTINCPATH')
env['_PYEXTCPPINCFLAGS'] = '$( ${_concat(INCPREFIX, PYEXTCPPPATH, '\
'INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['PYEXTOBJSUFFIX'] = '$SHOBJSUFFIX'
env['PYEXTOBJPREFIX'] = '$SHOBJPREFIX'
env['PYEXTRUNTIME'] = SCons.Util.CLVar("")
# XXX: this should be handled with different flags
env['_PYEXTRUNTIME'] = '$( ${_concat(LIBLINKPREFIX, PYEXTRUNTIME, '\
'LIBLINKSUFFIX, __env__)} $)'
# XXX: This won't work in all cases (using mingw, for example). To make
# this work, we need to know whether PYEXTCC accepts /c and /Fo or -c -o.
# This is difficult with the current way tools work in scons.
pycc, pycxx, pylink = pyext_coms(sys.platform)
env['PYEXTLINKFLAGSEND'] = SCons.Util.CLVar('$LINKFLAGSEND')
env['PYEXTCCCOM'] = pycc
env['PYEXTCXXCOM'] = pycxx
env['PYEXTLINKCOM'] = pylink
def _set_configuration_nodistutils(env):
# Set env variables to sensible values when not using distutils
def_cfg = {'PYEXTCC' : '$SHCC',
'PYEXTCFLAGS' : '$SHCFLAGS',
'PYEXTCCFLAGS' : '$SHCCFLAGS',
'PYEXTCXX' : '$SHCXX',
'PYEXTCXXFLAGS' : '$SHCXXFLAGS',
'PYEXTLINK' : '$LDMODULE',
'PYEXTSUFFIX' : '$LDMODULESUFFIX',
'PYEXTPREFIX' : ''}
if sys.platform == 'darwin':
def_cfg['PYEXTSUFFIX'] = '.so'
for k, v in def_cfg.items():
ifnotset(env, k, v)
ifnotset(env, 'PYEXT_ALLOW_UNDEFINED',
SCons.Util.CLVar('$ALLOW_UNDEFINED'))
ifnotset(env, 'PYEXTLINKFLAGS', SCons.Util.CLVar('$LDMODULEFLAGS'))
env.AppendUnique(PYEXTLINKFLAGS = env['PYEXT_ALLOW_UNDEFINED'])
def ifnotset(env, name, value):
if name not in env:
env[name] = value
def set_configuration(env, use_distutils):
"""Set construction variables which are platform dependants.
If use_distutils == True, use distutils configuration. Otherwise, use
'sensible' default.
Any variable already defined is untouched."""
# We define commands as strings so that we can either execute them using
# eval (same python for scons and distutils) or by executing them through
# the shell.
dist_cfg = {'PYEXTCC': ("sysconfig.get_config_var('CC')", False),
'PYEXTCFLAGS': ("sysconfig.get_config_var('CFLAGS')", True),
'PYEXTCCSHARED': ("sysconfig.get_config_var('CCSHARED')", False),
'PYEXTLINKFLAGS': ("sysconfig.get_config_var('LDFLAGS')", True),
'PYEXTLINK': ("sysconfig.get_config_var('LDSHARED')", False),
'PYEXTINCPATH': ("sysconfig.get_python_inc()", False),
'PYEXTSUFFIX': ("sysconfig.get_config_var('SO')", False)}
from distutils import sysconfig
# We set the python path even when not using distutils, because we rarely
# want to change this, even if not using distutils
ifnotset(env, 'PYEXTINCPATH', sysconfig.get_python_inc())
if use_distutils:
for k, (v, should_split) in dist_cfg.items():
val = eval(v)
if should_split:
val = val.split()
ifnotset(env, k, val)
else:
_set_configuration_nodistutils(env)
def generate(env):
"""Add Builders and construction variables for python extensions to an
Environment."""
if 'PYEXT_USE_DISTUTILS' not in env:
env['PYEXT_USE_DISTUTILS'] = False
# This sets all constructions variables used for pyext builders.
set_basic_vars(env)
set_configuration(env, env['PYEXT_USE_DISTUTILS'])
# Create the PythonObject builder
pyobj = createPythonObjectBuilder(env)
action = SCons.Action.Action("$PYEXTCCCOM", "$PYEXTCCCOMSTR")
pyobj.add_emitter('.c', SCons.Defaults.SharedObjectEmitter)
pyobj.add_action('.c', action)
action = SCons.Action.Action("$PYEXTCXXCOM", "$PYEXTCXXCOMSTR")
pyobj.add_emitter('$CXXFILESUFFIX', SCons.Defaults.SharedObjectEmitter)
pyobj.add_action('$CXXFILESUFFIX', action)
# Create the PythonExtension builder
createPythonExtensionBuilder(env)
def exists(env):
try:
# This is not quite right: if someone defines all variables by himself,
# it would work without distutils
from distutils import sysconfig
return True
except ImportError:
return False
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/USAGE.txt 0000644 0001751 0000171 00000005130 00000000000 015344 0 ustar 00runner docker 0000000 0000000 Cython - Usage Instructions
==========================
Building Cython extensions using distutils
-----------------------------------------
Cython comes with an experimental distutils extension for compiling
Cython modules, contributed by Graham Fawcett of the University of
Windsor (fawcett@uwindsor.ca).
The Demos directory contains a setup.py file demonstrating its use. To
compile the demos:
(1) cd Demos
(2) python setup.py build_ext --inplace
or
python setup.py build --build-lib=.
(You may get a screed of warnings from the C compiler, but you can
ignore these -- as long as there are no actual errors, things are
probably okay.)
Try out the extensions with:
python run_primes.py
python run_spam.py
python run_numeric_demo.py
Building Cython extensions by hand
---------------------------------
You can also invoke the Cython compiler on its own to translate a .pyx
file to a .c file. On Unix,
cython filename.pyx
On other platforms,
python cython.py filename.pyx
It's then up to you to compile and link the .c file using whatever
procedure is appropriate for your platform. The file
Makefile.nodistutils in the Demos directory shows how to do this for
one particular Unix system.
Command line options
--------------------
The cython command supports the following options:
Short Long Argument Description
-----------------------------------------------------------------------------
-v --version Display version number of cython compiler
-l --create-listing Write error messages to a .lis file
-I --include-dir Search for include files in named
directory (may be repeated)
-o --output-file Specify name of generated C file (only
one source file allowed if this is used)
-p, --embed-positions If specified, the positions in Cython files of each
function definition is embedded in its docstring.
-z, --pre-import If specified, assume undeclared names in this
module. Emulates the behavior of putting
"from import *" at the top of the file.
Anything else is taken as the name of a Cython source file and compiled
to a C source file. Multiple Cython source files can be specified
(unless -o is used), in which case each source file is treated as the
source of a distinct extension module and compiled separately to
produce its own C file.
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/bin/ 0000755 0001751 0000171 00000000000 00000000000 014510 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/bin/cygdb 0000755 0001751 0000171 00000000173 00000000000 015527 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
import sys
from Cython.Debugger import Cygdb as cygdb
if __name__ == '__main__':
cygdb.main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/bin/cython 0000755 0001751 0000171 00000000172 00000000000 015742 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
#
# Cython -- Main Program, Unix
#
from Cython.Compiler.Main import main
main(command_line = 1)
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/bin/cython.bat 0000644 0001751 0000171 00000000400 00000000000 016476 0 ustar 00runner docker 0000000 0000000 @REM Start cython from windows commandline as "cython", not "cython.py".
@REM This is especially useful for windows power shell, as no extra window
@REM is used.
@echo OFF
python -c "from Cython.Compiler.Main import main; main(command_line = 1)" %*
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5234332
Cython-0.29.28/bin/cython_freeze 0000755 0001751 0000171 00000017512 00000000000 017310 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
"""
Create a C file for embedding one or more Cython source files.
Requires Cython 0.11.2 (or perhaps newer).
See Demos/freeze/README.rst for more details.
"""
from __future__ import print_function
import optparse
from os.path import splitext, basename
usage= '%prog [-o outfile] [-p] module [module ...]'
description = 'Create a C file for embedding Cython modules.'
p = optparse.OptionParser(usage=usage, description=description)
p.add_option('-o', '--output', metavar='FILE',
help='write output to FILE instead of standard output')
p.add_option('-p', '--pymain', action='store_true', default=False,
help='do not automatically run the first module as __main__')
options, args = p.parse_args()
if len(args) < 1:
p.print_help()
p.exit(1)
if options.output:
import sys
old_stdout = sys.stdout
sys.stdout = open(options.output, 'w')
modules = [basename(splitext(x)[0]).replace('.', '_') for x in args]
print("""\
#include
#include
#include
#include
#ifdef __FreeBSD__
#include
#endif
#if PY_MAJOR_VERSION < 3
# define MODINIT(name) init ## name
#else
# define MODINIT(name) PyInit_ ## name
#endif
""")
for name in modules:
print("PyMODINIT_FUNC MODINIT(%s) (void);" % name)
print("""
static struct _inittab inittab[] = {""")
for name in modules:
print(' {"%(name)s", MODINIT(%(name)s)},' % {'name' : name})
print(""" {NULL, NULL}
};
""", end=' ')
if not options.pymain:
print("\nextern int __pyx_module_is_main_%s;" % modules[0])
print("""
#if PY_MAJOR_VERSION < 3
int main(int argc, char** argv) {
#elif defined(WIN32) || defined(MS_WINDOWS)
int wmain(int argc, wchar_t **argv) {
#else
static int python_main(int argc, wchar_t **argv) {
#endif
""", end=' ')
if not options.pymain:
print("""\
PyObject *m = NULL;
int r = 0;
""", end=' ')
print("""\
/* 754 requires that FP exceptions run in "no stop" mode by default,
* and until C vendors implement C99's ways to control FP exceptions,
* Python requires non-stop mode. Alas, some platforms enable FP
* exceptions by default. Here we disable them.
*/
#ifdef __FreeBSD__
fp_except_t m;
m = fpgetmask();
fpsetmask(m & ~FP_X_OFL);
#endif
if (PyImport_ExtendInittab(inittab)) {
fprintf(stderr, "No memory\\n");
exit(1);
}
""", end=' ')
if options.pymain:
print("""\
return Py_Main(argc, argv);
}
""")
else:
print("""\
Py_SetProgramName(argv[0]);
Py_Initialize();
PySys_SetArgv(argc, argv);
__pyx_module_is_main_%(main)s = 1;
m = PyImport_ImportModule(inittab[0].name);
if (!m) {
r = 1;
PyErr_Print(); /* This exits with the right code if SystemExit. */
#if PY_MAJOR_VERSION < 3
if (Py_FlushLine())
PyErr_Clear();
#endif
}
Py_XDECREF(m);
Py_Finalize();
return r;
}
""" % {'main' : modules[0]}, end=' ')
print(r"""
#if PY_MAJOR_VERSION >= 3 && !defined(WIN32) && !defined(MS_WINDOWS)
static wchar_t*
char2wchar(char* arg)
{
wchar_t *res;
#ifdef HAVE_BROKEN_MBSTOWCS
/* Some platforms have a broken implementation of
* mbstowcs which does not count the characters that
* would result from conversion. Use an upper bound.
*/
size_t argsize = strlen(arg);
#else
size_t argsize = mbstowcs(NULL, arg, 0);
#endif
size_t count;
unsigned char *in;
wchar_t *out;
#ifdef HAVE_MBRTOWC
mbstate_t mbs;
#endif
if (argsize != (size_t)-1) {
res = (wchar_t *)malloc((argsize+1)*sizeof(wchar_t));
if (!res)
goto oom;
count = mbstowcs(res, arg, argsize+1);
if (count != (size_t)-1) {
wchar_t *tmp;
/* Only use the result if it contains no
surrogate characters. */
for (tmp = res; *tmp != 0 &&
(*tmp < 0xd800 || *tmp > 0xdfff); tmp++)
;
if (*tmp == 0)
return res;
}
free(res);
}
/* Conversion failed. Fall back to escaping with surrogateescape. */
#ifdef HAVE_MBRTOWC
/* Try conversion with mbrtwoc (C99), and escape non-decodable bytes. */
/* Overallocate; as multi-byte characters are in the argument, the
actual output could use less memory. */
argsize = strlen(arg) + 1;
res = malloc(argsize*sizeof(wchar_t));
if (!res) goto oom;
in = (unsigned char*)arg;
out = res;
memset(&mbs, 0, sizeof mbs);
while (argsize) {
size_t converted = mbrtowc(out, (char*)in, argsize, &mbs);
if (converted == 0)
/* Reached end of string; null char stored. */
break;
if (converted == (size_t)-2) {
/* Incomplete character. This should never happen,
since we provide everything that we have -
unless there is a bug in the C library, or I
misunderstood how mbrtowc works. */
fprintf(stderr, "unexpected mbrtowc result -2\n");
return NULL;
}
if (converted == (size_t)-1) {
/* Conversion error. Escape as UTF-8b, and start over
in the initial shift state. */
*out++ = 0xdc00 + *in++;
argsize--;
memset(&mbs, 0, sizeof mbs);
continue;
}
if (*out >= 0xd800 && *out <= 0xdfff) {
/* Surrogate character. Escape the original
byte sequence with surrogateescape. */
argsize -= converted;
while (converted--)
*out++ = 0xdc00 + *in++;
continue;
}
/* successfully converted some bytes */
in += converted;
argsize -= converted;
out++;
}
#else
/* Cannot use C locale for escaping; manually escape as if charset
is ASCII (i.e. escape all bytes > 128. This will still roundtrip
correctly in the locale's charset, which must be an ASCII superset. */
res = malloc((strlen(arg)+1)*sizeof(wchar_t));
if (!res) goto oom;
in = (unsigned char*)arg;
out = res;
while(*in)
if(*in < 128)
*out++ = *in++;
else
*out++ = 0xdc00 + *in++;
*out = 0;
#endif
return res;
oom:
fprintf(stderr, "out of memory\n");
return NULL;
}
int
main(int argc, char **argv)
{
wchar_t **argv_copy = (wchar_t **)malloc(sizeof(wchar_t*)*argc);
/* We need a second copies, as Python might modify the first one. */
wchar_t **argv_copy2 = (wchar_t **)malloc(sizeof(wchar_t*)*argc);
int i, res;
char *oldloc;
if (!argv_copy || !argv_copy2) {
fprintf(stderr, "out of memory\n");
return 1;
}
oldloc = strdup(setlocale(LC_ALL, NULL));
setlocale(LC_ALL, "");
for (i = 0; i < argc; i++) {
argv_copy2[i] = argv_copy[i] = char2wchar(argv[i]);
if (!argv_copy[i])
return 1;
}
setlocale(LC_ALL, oldloc);
free(oldloc);
res = python_main(argc, argv_copy);
for (i = 0; i < argc; i++) {
free(argv_copy2[i]);
}
free(argv_copy);
free(argv_copy2);
return res;
}
#endif""")
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/bin/cythonize 0000755 0001751 0000171 00000000163 00000000000 016452 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
#
# command line frontend for cythonize()
#
from Cython.Build.Cythonize import main
main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/bin/cythonrun 0000755 0001751 0000171 00000000556 00000000000 016475 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
"""
Compile a Python script into an executable that embeds CPython and run it.
Requires CPython to be built as a shared library ('libpythonX.Y').
Basic usage:
python cythonrun somefile.py [ARGS]
"""
from Cython.Build.BuildExecutable import build, build_and_run
if __name__ == '__main__':
import sys
build_and_run(sys.argv[1:])
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/bin/move-declarators.sed 0000644 0001751 0000171 00000001447 00000000000 020462 0 ustar 00runner docker 0000000 0000000 # Moves
#
# use: sed [-E | -r] -i -f move-declarators.sed [files]
# Arrays
# cdef int a[5] -> cdef int[5] a
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+) +([_0-9a-zA-Z]+)((\[[0-9]*\])+)$/\1cdef \2\4 \3/
# Pointers
# cdef int a, *b -> cdef int a \n cdef int *b
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
s/^([ \t]*)cdef +([_0-9a-zA-Z. ]+)( +[_0-9a-zA-Z]+ +(=[^()]+)?),( *[*]+ *)([^()]+)/\1cdef \2\3\
\1cdef \2\5\6/
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/bin/pcython 0000755 0001751 0000171 00000006115 00000000000 016125 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
"""
Script to run Cython with a Python command line.
Executes Python code by compiling it in Cython and running the compiled code.
"""
import sys
import subprocess
def parse_args(args=None):
import optparse # tried argparse, but it doesn't stop at the first (interspersed) positional argument
parser = optparse.OptionParser(description='Run a Python command line with Cython')
parser.disable_interspersed_args()
parser.add_option('-c', metavar='CODE', dest='command',
help='program passed in as string')
parser.add_option('--python', metavar='PYTHON', dest='python', default=sys.executable,
help='Python interpreter to use')
parser.add_option('-X', metavar='NAME=VALUE', dest='directives', action='append',
help='Compiler directives to set during compilation')
parser.add_option('-V', '--version', action='store_true',
help='print the Python and Cython version numbers and exit')
return parser.parse_args(args)
def run_python(python, command):
subprocess.check_call([python, '-c', command])
def print_versions(python):
"""Print version numbers of Python and Cython.
"""
command = (
"import sys, Cython; "
"print('Python {}'.format('.'.join(map(str, sys.version_info[:3])))); "
"print('[Cython {}]'.format(Cython.__version__))"
)
run_python(python, command)
def run_cython_command(python, command, args=None):
"""Compile and run a Python command string.
"""
command = (
"import sys; "
"from Cython.Build.Inline import cython_inline; "
"sys.argv[1:] = {args!r}; "
"(lambda: cython_inline({code!r}, quiet=True))()"
).format(code=command, args=list(args) if args else [])
run_python(python, command)
def run_python_stdin(python, file_args=None, directives=None):
"""Compile and run a Python program from stdin.
"""
import shutil
import tempfile
with tempfile.NamedTemporaryFile(suffix='.py') as f:
shutil.copyfileobj(sys.stdin, f)
f.flush()
file_args = [f.name] + list(file_args or ())
run_python_file(python, file_args, directives)
def run_python_file(python, file_args, directives=None):
"""Compile and run a Python program from a file.
"""
args = []
if directives:
for directive in directives:
args.extend(('-X', directive))
args.extend(file_args)
command = (
"import Cython.Build.BuildExecutable as bex; "
"bex.DEBUG = False; "
"bex.build_and_run({args!r})"
).format(args=args)
run_python(python, command)
def main():
options, args = parse_args()
python = options.python
if options.version:
print_versions(python)
return
if options.command:
run_cython_command(python, options.command, args)
if args:
if args[0] == '-':
run_python_stdin(python, args[1:], options.directives)
else:
run_python_file(python, args, options.directives)
if __name__ == '__main__':
main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/cygdb.py 0000755 0001751 0000171 00000000173 00000000000 015406 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
import sys
from Cython.Debugger import Cygdb as cygdb
if __name__ == '__main__':
cygdb.main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/cython.py 0000755 0001751 0000171 00000001010 00000000000 015611 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
#
# Cython -- Main Program, generic
#
if __name__ == '__main__':
import os
import sys
# Make sure we import the right Cython
cythonpath, _ = os.path.split(os.path.realpath(__file__))
sys.path.insert(0, cythonpath)
from Cython.Compiler.Main import main
main(command_line = 1)
else:
# Void cython.* directives.
from Cython.Shadow import *
## and bring in the __version__
from Cython import __version__
from Cython import load_ipython_extension
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/cythonize.py 0000755 0001751 0000171 00000000222 00000000000 016325 0 ustar 00runner docker 0000000 0000000 #!/usr/bin/env python
#
# Cython -- enhanced main program
#
if __name__ == '__main__':
from Cython.Build.Cythonize import main
main()
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/docs/ 0000755 0001751 0000171 00000000000 00000000000 014670 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/docs/.hgignore 0000644 0001751 0000171 00000000077 00000000000 016477 0 ustar 00runner docker 0000000 0000000 syntax: glob
*.pyc
*~
.*.swp
syntax: regexp
^build/
^_build/
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/docs/CONTRIBUTING.rst 0000644 0001751 0000171 00000002361 00000000000 017333 0 ustar 00runner docker 0000000 0000000 Welcome, and thank you for your interest in contributing!
=========================================================
If you are looking for a good way to contribute to the Cython project, please
* have a look at the `Cython Hacker Guide `_,
especially the section on `getting started `_.
* look through the `issues that need help `_.
* look through the `issues that are a good entry point for beginners `_.
* ask on the `core developers mailing list `_ for guidance.
If you have code that you want to contribute, please make sure that it
* includes tests in the `tests/` directory (see the `Hacker Guide on Testing `_)
* comes in form of a pull request
We use `travis `_ and `appveyor `_ for cross-platform testing, including pull requests.
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/docs/Makefile 0000644 0001751 0000171 00000013077 00000000000 016340 0 ustar 00runner docker 0000000 0000000 # Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER = a4
BUILDDIR = build
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean pdf html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
-rm -rf $(BUILDDIR)/*
pdf:
$(SPHINXBUILD) -b pdf $(ALLSPHINXOPTS) $(BUILDDIR)/pdf
@echo
@echo "Build finished. The PDF is in $(BUILDDIR)/pdf."
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Cython.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Cython.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/Cython"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Cython"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/docs/README 0000644 0001751 0000171 00000001233 00000000000 015547 0 ustar 00runner docker 0000000 0000000 Welcome to Cython's documentation.
To build the documentation on Linux, you need Make and Sphinx installed on your system. Then execute::
make html
On windows systems, you only need Sphinx. Open PowerShell and type::
./make.bat html
You can then see the documentation by opening in a browser ``cython/docs/build/html/index.html``.
The current Cython documentation files are hosted at
https://cython.readthedocs.io/en/latest/
Notes
=======
1) Some css work should definitely be done.
2) Use local 'top-of-page' contents rather than the sidebar, imo.
3) Provide a link from each (sub)section to the TOC of the page.
4) Fix cython highlighter for cdef blocks
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/docs/TODO 0000644 0001751 0000171 00000002061 00000000000 015357 0 ustar 00runner docker 0000000 0000000 Background
----------
[brain dump]
The "Old Cython Users Guide" is a derivative of the old Pyrex documentation.
It underwent substantial editing by Peter Alexandar
to become the Reference Guide, which is oriented around bullet points
and lists rather than prose. This transition was incomplete.
At nearly the same time, Robert, Dag, and Stefan wrote a tutorial as
part of the SciPy proceedings. It was felt that the content there was
cleaner and more up to date than anything else, and this became the
basis for the "Getting Started" and "Tutorials" sections. However,
it simply doesn't have as much content as the old documentation used to.
Eventually, it seems all of the old users manual could be whittled
down into independent tutorial topics. Much discussion of what we'd
like to see is at
http://www.mail-archive.com/cython-dev@codespeak.net/msg06945.html
There is currently a huge amount of redundancy, but no one section has
it all.
Also, we should go through the wiki enhancement proposal list
and make sure to transfer the (done) ones into the user manual.
././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055920.6722944
Cython-0.29.28/docs/_static/ 0000755 0001751 0000171 00000000000 00000000000 016316 5 ustar 00runner docker 0000000 0000000 ././@PaxHeader 0000000 0000000 0000000 00000000034 00000000000 011452 x ustar 00 0000000 0000000 28 mtime=1645055911.5274334
Cython-0.29.28/docs/_static/cython-logo-light.png 0000644 0001751 0000171 00000010117 00000000000 022373 0 ustar 00runner docker 0000000 0000000 PNG
IHDR @ x sBIT|d pHYs tEXtSoftware www.inkscape.org< IDATx tTY+V+V[a!$leBK&,B,@$%!{B;sgrg2˛dHO˛wr}!3( ʄN@-2@*(
yAq=˲h#t:@tYO BgvB5{@pL4j"wc :3_cKhyxsR1O0cNN998Bsa B'@7kءt;-B`
5
&Cq>@ʂ; zY?
krH5J Mܱ0YSm5E tch`GQ|-&*C7@Z$9*4l}!%p':K͉2vA[Ϻ"h{ DN7// Gw,j#>hP\V{El(Œ8ŢĠ2><qOjOj]wëJD"D_B|#>$ Zk,dK/yXr]E #~D&}$^svۉǬmfm"1pB0`X y=ZkH"M0aܢ.دg>'MRx)mƖ;3"1:qO92[]_'JigGC(