pyFAI-0.11.0/ 0000755 0001773 0001774 00000000000 12553735716 013642 5 ustar kieffer kieffer 0000000 0000000 pyFAI-0.11.0/test/ 0000755 0001773 0001774 00000000000 12553735716 014621 5 ustar kieffer kieffer 0000000 0000000 pyFAI-0.11.0/test/debug_ocl_sort.py 0000644 0001773 0001774 00000001627 12527541311 020156 0 ustar kieffer kieffer 0000000 0000000 import numpy, time
import pyFAI, pyFAI.opencl
from pyFAI.opencl import pyopencl, ocl
import pyopencl.array
N = 1024
ws = N // 8
ctx = ocl.create_context("GPU")
queue = pyopencl.CommandQueue(ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)
h_data = numpy.random.random(N).astype("float32")
d_data = pyopencl.array.to_device(queue, h_data)
local_mem = pyopencl.LocalMemory(ws * 32) # 2float4 = 2*4*4 bytes per workgroup size
src = pyFAI.utils.read_cl_file("bsort.cl")
prg = pyopencl.Program(ctx, src).build()
t0 = time.time()
hs_data = numpy.sort(h_data)
t1 = time.time()
time_sort = 1e3 * (t1 - t0)
print time_sort
evt = prg.bsort_init(queue, (ws,), (ws,), d_data.data, local_mem)
evt.wait()
err = abs(hs_data - d_data.get()).max()
print("Numpy sort on %s element took %s ms" % (N, time_sort))
print("Reference sort time: %s ms, err=%s " % (1e-6 * (evt.profile.end - evt.profile.start), err))
pyFAI-0.11.0/test/chi_square_ds8.py 0000755 0001773 0001774 00000005624 12406056407 020074 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
#coding: utf-8
#tests if the distribution of Chi2 is centered around 1:
# Needs a large dataset (thousands of images)
import os
import sys
import glob
import pylab
pylab.ion()
import numpy
from math import sqrt
import fabio
from utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from lxml import etree
ai = pyFAI.AzimuthalIntegrator(detector="Pilatus1M")
images = glob.glob("/data/bm29/inhouse/opd29/20140430/raw/water_008_*.edf")
images.sort()
img = images[0]
xml = etree.parse(os.path.splitext(img)[0] + ".xml")
wl = float(xml.xpath("//wavelength")[0].getchildren()[0].text)
centerX = float(xml.xpath("//beamCenter_1")[0].getchildren()[0].text)
centerY = float(xml.xpath("//beamCenter_2")[0].getchildren()[0].text)
directDist = float(xml.xpath("//detectorDistance")[0].getchildren()[0].text) * 1000.0
msk = xml.xpath("//maskFile")[0].getchildren()[0].getchildren()[0].text
msk = numpy.logical_or(fabio.open(msk).data, ai.detector.mask)
ai.setFit2D(directDist=directDist, centerX=centerX, centerY=centerY)
ai.wavelength = wl
I_splitBB = [];sigma_splitBB = [];I_splitFull = [];sigma_splitFull = [];I_nosplit = [];sigma_nosplit = []
for fn in images[:10]:
img = fabio.open(fn).data
xml = etree.parse(os.path.splitext(fn)[0] + ".xml")
monitor = float(xml.xpath("//beamStopDiode")[0].getchildren()[0].text)
print(fn, monitor);
variance = numpy.maximum(img, 1)
q, i, s = ai.integrate1d(img, 1040, unit="q_nm^-1", method="numpy", variance=variance, mask=msk, normalization_factor=monitor)
I_nosplit.append(i)
sigma_nosplit.append(s)
q, i, s = ai.integrate1d(img, 1040, unit="q_nm^-1", method="splitbbox", variance=variance, mask=msk, normalization_factor=monitor)
I_splitBB.append(i)
sigma_splitBB.append(s)
q, i, s = ai.integrate1d(img, 1040, unit="q_nm^-1", method="splitpixel", variance=variance, mask=msk, normalization_factor=monitor)
I_splitFull.append(i)
sigma_splitFull.append(s)
I_splitBB = numpy.vstack(I_splitBB)
I_splitFull = numpy.vstack(I_splitFull)
I_nosplit = numpy.vstack(I_nosplit)
sigma_nosplit = numpy.vstack(sigma_nosplit)
sigma_splitBB = numpy.vstack(sigma_splitBB)
sigma_splitFull = numpy.vstack(sigma_splitFull)
Chi2_splitBB = [];Chi2_splitFull = []; Chi2_nosplit = []
Iavg_splitFull = I_splitFull.mean(axis=0)
Iavg_splitBB = I_splitBB.mean(axis=0)
Iavg_nosplit = I_nosplit.mean(axis=0)
for i in range(I_splitBB.shape[0]):
Chi2_splitBB.append((((I_splitBB[i] - Iavg_splitBB) / sigma_splitBB[i]) ** 2).mean())
Chi2_splitFull.append((((I_splitFull[i] - Iavg_splitFull) / sigma_splitFull[i]) ** 2).mean())
Chi2_nosplit.append((((I_nosplit[i] - Iavg_nosplit) / sigma_nosplit[i]) ** 2).mean())
pylab.hist(Chi2_splitBB, 50, label="splitBB")
pylab.hist(Chi2_splitFull, 50, label="splitFull")
pylab.hist(Chi2_nosplit, 50, label="no_split")
pylab.xlabel("$\chi^2$")
pylab.ylabel("count")
pylab.legend()
pylab.show()
pyFAI-0.11.0/test/test_distortion.py 0000755 0001773 0001774 00000007753 12527541311 020432 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for Distortion correction class"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
# import os
import numpy
# import logging, time
import sys
import fabio
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI import _distortion, detectors
# _distortion = sys.modules["pyFAI._distortion"]
# detectors = sys.modules["pyFAI.detectors"]
class test_halfccd(unittest.TestCase):
"""basic test"""
halfFrelon = "1464/LaB6_0020.edf"
splineFile = "1461/halfccd.spline"
fit2d_cor = "2454/halfccd.fit2d.edf"
def setUp(self):
"""Download files"""
self.fit2dFile = UtilsTest.getimage(self.__class__.fit2d_cor)
self.halfFrelon = UtilsTest.getimage(self.__class__.halfFrelon)
self.splineFile = UtilsTest.getimage(self.__class__.splineFile)
self.det = detectors.FReLoN(self.splineFile)
self.dis = _distortion.Distortion(self.det)
self.fit2d = fabio.open(self.fit2dFile).data
self.raw = fabio.open(self.halfFrelon).data
def test_vs_fit2d(self):
"""
Compare spline correction vs fit2d's code
precision at 1e-3 : 90% of pixels
"""
size = self.dis.calc_LUT_size()
mem = size.max() * self.raw.nbytes * 4 / 2.0 ** 20
logger.info("Memory expected for LUT: %.3f MBytes", mem)
try:
self.dis.calc_LUT()
except MemoryError as error:
logger.warning("test_halfccd.test_vs_fit2d failed because of MemoryError. This test tries to allocate %.3fMBytes and failed with %s", mem, error)
return
cor = self.dis.correct(self.raw)
delta = abs(cor - self.fit2d)
mask = numpy.where(self.fit2d == 0)
denom = self.fit2d.copy()
denom[mask] = 1
ratio = delta / denom
ratio[mask] = 0
good_points_ratio = 1.0 * (ratio < 1e-3).sum() / self.raw.size
logger.info("ratio of good points (less than 1/1000 relative error): %.4f" % good_points_ratio)
self.assert_(good_points_ratio > 0.99, "99% of all points have a relative error below 1/1000")
def test_suite_all_distortion():
testSuite = unittest.TestSuite()
testSuite.addTest(test_halfccd("test_vs_fit2d"))
# testSuite.addTest(test_azim_halfFrelon("test_numpy_vs_fit2d"))
# testSuite.addTest(test_azim_halfFrelon("test_cythonSP_vs_fit2d"))
# testSuite.addTest(test_azim_halfFrelon("test_cython_vs_numpy"))
# testSuite.addTest(test_flatimage("test_splitPixel"))
# testSuite.addTest(test_flatimage("test_splitBBox"))
# This test is known to be broken ...
# testSuite.addTest(test_saxs("test_mask"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_distortion()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/profile_ocl_lut_pixelsplit2.py 0000644 0001773 0001774 00000012231 12527541311 022675 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio
import pyopencl as cl
from pylab import *
from six.moves import input
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
#from pyFAI import splitBBoxCSR
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size/8,4,2)
ref = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
pos_size = pos.size
#size = data.size
size = pos_size/8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4*workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
compile_options = "-D BINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D EPS=%e" % \
(bins, size, workgroup_size, numpy.finfo(numpy.float32).eps)
print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size*workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax=(min0,max0,min1,max1)
print(minmax)
print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
#d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
#program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
#program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
#outMax_1 = numpy.copy(d_outMax)
outMax = ref.outMax
idx_ptr = numpy.ndarray(bins+1, dtype=numpy.int32)
idx_ptr[0] = 0
idx_ptr[1:] = outMax.cumsum()
d_idx_ptr = cl.array.to_device(queue, idx_ptr)
#d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
#program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
#lutsize = numpy.ndarray(1, dtype=numpy.int32)
#cl.enqueue_copy(queue, lutsize, d_lutsize.data)
#print lutsize
lut_size = int(idx_ptr[-1])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
#d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
#check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
#cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
#program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins*workgroup_size,),(workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
#outData = numpy.ndarray(bins, dtype=numpy.float32)
#outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
cl.enqueue_copy(queue,outMerge, d_outMerge.data)
#program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
#cl.enqueue_copy(queue,outMerge, d_outMerge)
ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="splitpixelfull")
#assert(numpy.allclose(ref,outMerge))
plot(ref[0],outMerge, label="ocl_lut_merge")
##plot(ref[0],outData, label="ocl_lut_data")
##plot(ref[0],outCount, label="ocl_lut_count")
plot(*ref, label="ref_merge")
##plot(ref[0], ref[2], label="ref_data")
##plot(ref[0], ref[3], label="ref_count")
###plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
show()
input()
pyFAI-0.11.0/test/profile_csr_padded_csr.py 0000644 0001773 0001774 00000006501 12527541311 021637 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio, pyopencl
from pylab import *
from six.moves import input
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitBBox
from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import ocl_azim_csr
logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/Pilatus1M.poni")
data = fabio.open("testimages/Pilatus1M.edf").data
ref = ai.xrpd_LUT(data, 1000)[1]
obt = ai.xrpd_LUT_OCL(data, 1000)[1]
logger.debug("check LUT basics: %s"%abs(obt[1] - ref[1]).max())
assert numpy.allclose(ref,obt)
workgroup_size = 128
print("Workgroup size = ", workgroup_size)
out_cyt_bb = pyFAI.splitBBox.histoBBox1d(data, ai._ttha, ai._dttha, bins=1000)[1]
t0 = time.time()
cyt_lut = pyFAI.splitBBoxLUT.HistoBBox1d(
ai._ttha,
ai._dttha,
bins=1000,
unit="2th_deg")
t1 = time.time()
print("Time to create cython lut: ", t1-t0)
t0 = time.time()
cyt_lut.generate_csr()
t1 = time.time()
print("Time to generate CSR from cython lut: ", t1-t0)
t0 = time.time()
cyt_lut.generate_csr_padded(workgroup_size)
t1 = time.time()
print("Time to generate CSR_Padded from cython lut: ", t1-t0)
t0 = time.time()
cyt_csr = pyFAI.splitBBoxCSR.HistoBBox1d(
ai._ttha,
ai._dttha,
bins=1000,
unit="2th_deg")
t1 = time.time()
print("Time to create cython CSR: ", t1-t0)
t0 = time.time()
cyt_csr_padded = pyFAI.splitBBoxCSR.HistoBBox1d(
ai._ttha,
ai._dttha,
bins=1000,
unit="2th_deg",
padding=workgroup_size)
t1 = time.time()
print("Time to create cython CSR_Padded: ", t1-t0)
out_cyt_lut = cyt_lut.integrate(data)[1]
ocl_lut = pyFAI.ocl_azim_lut.OCL_LUT_Integrator(cyt_lut.lut, data.size, "GPU",profile=True)
out_ocl_lut = ocl_lut.integrate(data)[0]
print("")
print("OpenCL LUT on: ", ocl_lut.device)
ocl_lut.log_profile()
print("")
print("================================================================================")
ocl_lut.__del__()
ocl_csr = ocl_azim_csr.OCL_CSR_Integrator(cyt_csr.lut, data.size, "GPU",profile=True, block_size=workgroup_size)
out_ocl_csr = ocl_csr.integrate(data)[0]
print("")
print("ÖpenCL CSR on: ", ocl_csr.device)
ocl_csr.log_profile()
print("")
print("================================================================================")
ocl_csr.__del__
ocl_csr_padded = ocl_azim_csr.OCL_CSR_Integrator(cyt_csr_padded.lut, data.size, "GPU",profile=True, padded=True, block_size=workgroup_size)
out_ocl_csr_padded = ocl_csr_padded.integrate(data)[0]
print("")
print("ÖpenCL CSR padded: ", ocl_csr_padded.device)
ocl_csr_padded.log_profile()
print("")
print("================================================================================")
ocl_csr_padded.__del__
#assert numpy.allclose(out_ocl_csr_padded,out_cyt_bb)
plot(out_cyt_bb, label="cyt_bb" )
plot(out_cyt_lut, label="cyt_lut" )
plot(out_ocl_lut, label="ocl_lut")
plot(out_ocl_csr, label="ocl_csr")
plot(out_ocl_csr_padded, label="ocl_csr_padded")
legend()
show()
input()
pyFAI-0.11.0/test/profile_pixelsplitFullLUT2.py 0000644 0001773 0001774 00000006254 12527541311 022374 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio
import pyopencl as cl
from pylab import *
from six.moves import input
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import splitPixelFull
import scipy
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size/8,4,2)
pos_size = pos.size
#size = data.size
size = pos_size/8
boo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg") #1101814
#matrix_32 = scipy.sparse.csr_matrix((boo.data,boo.indices,boo.indptr), shape=(bins,data.size))
#mat32d = matrix_32.todense()
#mat32d.shape = (mat32d.size,)
out = boo.integrate(data)
#ai.xrpd_LUT(data, 1000)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
#foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg", bad_pixel=None)
#ref = foo.integrate(data)
#matrix_64 = scipy.sparse.csr_matrix((foo.data,foo.indices,foo.indptr), shape=(bins,data.size))
#mat64d = matrix_64.todense()
#mat64d.shape = (mat64d.size,)
#foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
#bools_bad = (abs(mat32d - mat64d) > 0.000001)
#bools_good = (abs(mat32d - mat64d) <= 0.000001)
#del mat32d
#del mat64d
#del matrix_32
#del matrix_64
#tmp = numpy.where(bools_bad)[1].ravel()
#pixels_bad = numpy.copy(tmp)
#pixels_bad.sort()
#tmp = numpy.where(bools_good)[1]
#pixels_good = numpy.copy(tmp)
#pixels_good.sort()
#pff_ind: (matrix([[856]]), matrix([[1101814]]))
#ref = splitPixelFull.fullSplit1D(pos, data, bins)
#ref = foo.integrate(data)
#assert(numpy.allclose(ref[1],outMerge))
#plot(ref[0],outMerge, label="ocl_lut_merge")
#plot(ref[0],outData, label="ocl_lut_data")
#plot(ref[0],outCount, label="ocl_lut_count")
plot(out[0], out[1], label="ocl_lut_merge")
#plot(out[0], out[2], label="ocl_lut_data")
#plot(out[0], out[3], label="ocl_lut_count")
#plot(ref[0], ref[1], label="ref_merge")
#plot(ref[0], ref[2], label="ref_data")
#plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
show()
input()
#aaa = 0
#bbb = 0
#for i in range(bins):
#ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
#ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
#sort1 = numpy.argsort(ind_tmp1)
#sort2 = numpy.argsort(ind_tmp2)
#data_1 = data_tmp1[sort1]
#data_2 = data_tmp2[sort2]
#for j in range(data_1.size):
#aaa += 1
#if not numpy.allclose(data_1[j],data_2[j]):
#bbb += 1
#print data_1[j],data_2[j],numpy.allclose(data_1[j],data_2[j]), idx_ptr[i]+j
#print aaa,bbb pyFAI-0.11.0/test/profile_ocl_lut.py 0000755 0001773 0001774 00000011314 12527541311 020342 0 ustar kieffer kieffer 0000000 0000000 from __future__ import absolute_import
from __future__ import print_function
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
import sys, numpy, time
from . import utilstest
import fabio, pyopencl
from pylab import *
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitBBox
#splitBBox = sys.modules["pyFAI.splitBBox"]
ai = pyFAI.load("testimages/Pilatus1M.poni")
data = fabio.open("testimages/Pilatus1M.edf").data
ref = ai.xrpd_LUT(data, 1000)
obt = ai.xrpd_LUT_OCL(data, 1000)
print(abs(obt[1] - ref[1]).max())
lut = ai._lut_integrator.lut
gpu = pyFAI.ocl_azim_lut.OCL_LUT_Integrator(lut, data.size, "GPU")
print(gpu.device)
img = numpy.zeros(data.shape, dtype="float32")
print("ref", (data == -2).sum(), (data == -1).sum())
pyopencl.enqueue_copy(gpu._queue, img, gpu._cl_mem["image"])#.wait()
print("obt", (img == -2).sum(), (img == -1).sum())
out_cyt = ai._lut_integrator.integrate(data)
out_ocl = gpu.integrate(data)[0]
print("NoCorr R=", utilstest.Rwp((out_cyt[0], out_ocl), out_cyt[:2], "no corrections"))
nodummy = out_cyt[1]
plot(nodummy + 1, label="no_corr")
out_cyt = ai._lut_integrator.integrate(data, dummy= -2, delta_dummy=1.5)
out_ocl = gpu.integrate(data, dummy= -2, delta_dummy=1.5)[0]
print("Dummy R=", utilstest.Rwp((out_cyt[0], out_ocl), out_cyt[:2], "Dummy"))
#print "nodummy/Dummy", utilstest.Rwp((out_cyt[0], out_cyt[1]), (out_cyt[0], nodummy), "nodummy/Dummy")
dark = numpy.random.random(data.shape)
out_cyt = ai._lut_integrator.integrate(data, dark=dark)
out_ocl = gpu.integrate(data, dark=dark)[0]
print("Dark R=", utilstest.Rwp((out_cyt[0], out_ocl), out_cyt[:2], "dark"))
flat = 2 * numpy.ones_like(data)
out_cyt = ai._lut_integrator.integrate(data, flat=flat)
out_ocl = gpu.integrate(data, flat=flat)[0]
print("Flat R=", utilstest.Rwp((out_cyt[0], out_ocl), out_cyt[:2], "flat"))
solidAngle = ai.solidAngleArray(data.shape)
out_cyt = ai._lut_integrator.integrate(data, solidAngle=solidAngle)
out_ocl = gpu.integrate(data, solidAngle=solidAngle)[0]
print("SolidAngle R=", utilstest.Rwp((out_cyt[0], out_ocl), out_cyt[:2], "SolidAngle"))
polarization = ai.polarization(data.shape, 0.95)
out_cyt = ai._lut_integrator.integrate(data, polarization=polarization)
out_ocl = gpu.integrate(data, polarization=polarization)[0]
print("PolarizationR=", utilstest.Rwp((out_cyt[0], out_ocl), out_cyt[:2], "Polarization"))
#pyopencl.enqueue_copy(gpu._queue, img, gpu._cl_mem["image"]).wait()
#print "SolidAngle", solidAngle
#print img
#xx = splitBBox.histoBBox1d(weights=data,
# pos0=ai._ttha,
# delta_pos0=ai._dttha,
# bins=1000,
# polarization=polarization)[1]
#plot(xx + 2, label="xrpd")
#print "Pol: lut/refR=", utilstest.Rwp((out_cyt[0], xx), out_cyt[:2], "Polarization")
#print "Pol: ocl/refR=", utilstest.Rwp((out_cyt[0], out_ocl), (out_cyt[0], xx), "Polarization")
#print "Pol: noc/refR=", utilstest.Rwp((out_cyt[0], nodummy), (out_cyt[0], xx), "Polarization")
#print out_ocl
plot(out_cyt[1], label="ref")
plot(out_ocl, label="obt")
#plot(out, label="out")
#outData = numpy.zeros(1000, "float32")
#outCount = numpy.zeros(1000, "float32")
#outMerge = numpy.zeros(1000, "float32")
#pyopencl.enqueue_copy(gpu._queue, outData, gpu._cl_mem["outData"])#.wait()
#pyopencl.enqueue_copy(gpu._queue, outCount, gpu._cl_mem["outCount"])#.wait()
#pyopencl.enqueue_copy(gpu._queue, outMerge, gpu._cl_mem["outMerge"])#.wait()
#plot(outData, label="outData")
#plot(outCount, label="outCount")
#plot(outMerge, label="outMerge")
legend()
t0 = time.time()
out = gpu.integrate(data, dummy= -2, delta_dummy=1.5)
print("Timings With dummy", 1000 * (time.time() - t0))
t0 = time.time()
out = gpu.integrate(data)
print("Timings Without dummy", 1000 * (time.time() - t0))
yscale("log")
show()
pyFAI-0.11.0/test/profile_OCLFullSplit.py 0000644 0001773 0001774 00000001374 12527541311 021157 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio
import pyopencl as cl
from pylab import *
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import OCLFullSplit
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size/8,4,2)
pos_size = pos.size
#size = data.size
size = pos_size/8
foo = OCLFullSplit.OCLFullSplit1d(pos,bins)
print(foo.pos0Range)
print(foo.pos1Range) pyFAI-0.11.0/test/profile_splitPixelFullLUT.py 0000644 0001773 0001774 00000002522 12527541311 022244 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
import sys, numpy, time
import utilstest
import fabio
import pyopencl as cl
from pylab import *
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
import scipy
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = (100, 36)
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
# size = data.size
size = pos_size / 8
boo = splitPixelFullLUT.HistoLUT2dFullSplit(pos, bins, unit="2th_deg")
foo = boo.integrate(data)
# ref = ai.integrate2d(data,bins=bins,unit="2th_deg", correctSolidAngle=False, method="lut")
# assert(numpy.allclose(ref[1],outMerge))
plot(foo[0])
# plot(ref[0],outMerge, label="ocl_lut_merge")
# plot(ref[0],outData, label="ocl_lut_data")
# plot(ref[0],outCount, label="ocl_lut_count")
# plot(out[0], out[1], label="ocl_lut_merge")
# plot(out[0], out[2], label="ocl_lut_data")
# plot(out[0], out[3], label="ocl_lut_count")
# plot(ref[0], ref[1], label="ref_merge")
# plot(ref[0], ref[2], label="ref_data")
# plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
# legend()
show()
pyFAI-0.11.0/test/__init__.py 0000755 0001773 0001774 00000003322 12527541311 016720 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
# print(
"""
Test module pyFAI.
"""
# )
__authors__ = ["Jérôme Kieffer"]
__contact__ = "jerome.kieffer@esrf.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__data__ = "2014-11-10"
import os
import sys
import unittest
# if __path__ not in dir():
# print("Create the package to allow relative imports")
# __name__ = "pyFAI.test"
# dirname = os.path.dirname(__file__)
from . import utilstest
from .test_all import test_suite_all
def run_tests():
"""Run test complete test_suite"""
mysuite = test_suite_all()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
print("Test suite failed")
return 1
else:
print("Test suite succeeded")
return 0
pyFAI-0.11.0/test/bug-1280.py 0000755 0001773 0001774 00000001147 12321446554 016336 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
import sys, os
import numpy
import fabio
from utilstest import UtilsTest
pyFAI = UtilsTest.pyFAI
data = fabio.open(UtilsTest.getimage("1788/moke.tif")).data
ai = pyFAI.AzimuthalIntegrator.sload("moke.poni")
ai.xrpd(data, 1000)
tth = ai.twoThetaArray(data.shape)
dtth = ai.delta2Theta(data.shape)
o1 = ai.xrpd(data, 1000)
o2 = ai.xrpd(data, 1000, tthRange=[3.5, 12.5])
o3 = ai.xrpd(data, 1000, chiRange=[10, 80])
o4 = ai.xrpd2(data, 100, 36, tthRange=[3.5, 12.5], chiRange=[10, 80])
from pylab import *
plot(o1[0], o1[1], "b")
plot(o2[0], o2[1], "r")
plot(o3[0], o3[1], "g")
imshow(o4[0])
show()
pyFAI-0.11.0/test/profile_pixelsplitFull.py 0000644 0001773 0001774 00000014334 12527541311 021723 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio
import pyopencl as cl
from pylab import *
from six.moves import input
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import splitPixelFull
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size/8,4,2)
pos_size = pos.size
#size = data.size
size = pos_size/8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4*workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
compile_options = "-D BINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D EPS=%e" % \
(bins, size, workgroup_size, numpy.finfo(numpy.float32).eps)
print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size*workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax=(min0,max0,min1,max1)
print(minmax)
print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
outMax_1 = numpy.copy(d_outMax)
d_idx_ptr = cl.array.empty(queue, (bins+1,), dtype=numpy.int32)
d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
cl.enqueue_copy(queue, lutsize, d_lutsize.data)
print(lutsize)
lut_size = int(lutsize[0])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
#d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
indices = ndarray(lut_size, dtype=numpy.int32)
data_lut = ndarray(lut_size, dtype=numpy.float32)
idx_ptr = ndarray(bins+1, dtype=numpy.int32)
cl.enqueue_copy(queue,indices, d_indices.data)
cl.enqueue_copy(queue,data_lut, d_data.data)
cl.enqueue_copy(queue,idx_ptr, d_idx_ptr.data)
#check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
#cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
#program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins*workgroup_size,),(workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
#outData = numpy.copy(d_outData)
#outCount = numpy.copy(d_outCount)
#outMerge = numpy.copy(d_outMerge)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
cl.enqueue_copy(queue,outData, d_outData.data)
cl.enqueue_copy(queue,outCount, d_outCount.data)
cl.enqueue_copy(queue,outMerge, d_outMerge.data)
#program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
#cl.enqueue_copy(queue,outMerge, d_outMerge)
ai.xrpd_LUT(data, 1000)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
#foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
#ref = splitPixelFull.fullSplit1D(pos, data, bins)
ref = foo.integrate(data)
#assert(numpy.allclose(ref[1],outMerge))
#plot(ref[0],outMerge, label="ocl_lut_merge")
#plot(ref[0],outData, label="ocl_lut_data")
plot(ref[0],outCount, label="ocl_lut_count")
#plot(ref[0], ref[1], label="ref_merge")
#plot(ref[0], ref[2], label="ref_data")
plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
show()
input()
#aaa = 0
#bbb = 0
#for i in range(bins):
#ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
#ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
#sort1 = numpy.argsort(ind_tmp1)
#sort2 = numpy.argsort(ind_tmp2)
#data_1 = data_tmp1[sort1]
#data_2 = data_tmp2[sort2]
#for j in range(data_1.size):
#aaa += 1
#if not numpy.allclose(data_1[j],data_2[j]):
#bbb += 1
#print data_1[j],data_2[j],numpy.allclose(data_1[j],data_2[j]), idx_ptr[i]+j
#print aaa,bbb pyFAI-0.11.0/test/test_dummy.py 0000644 0001773 0001774 00000003513 12527541311 017352 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"Dummy test to run first to check for relative imports"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
import sys
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
class TestDummy(unittest.TestCase):
def test_dummy(self):
print(__name__)
print(pyFAI)
def test_suite_all_dummy():
testSuite = unittest.TestSuite()
testSuite.addTest(TestDummy("test_dummy"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_dummy()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up()
pyFAI-0.11.0/test/check_calib.py 0000755 0001773 0001774 00000014714 12527541311 017377 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
from __future__ import absolute_import, print_function, with_statement, division
# this is a very simple tool that checks the calibratation
import pyFAI, fabio, numpy, sys, os, optparse, time
import pylab
def shift(input, shift):
"""
Shift an array like scipy.ndimage.interpolation.shift(input, shift, mode="wrap", order=0) but faster
@param input: 2d numpy array
@param shift: 2-tuple of integers
@return: shifted image
"""
re = numpy.zeros_like(input)
s0, s1 = input.shape
d0 = shift[0] % s0
d1 = shift[1] % s1
r0 = (-d0) % s0
r1 = (-d1) % s1
re[d0:, d1:] = input[:r0, :r1]
re[:d0, d1:] = input[r0:, :r1]
re[d0:, :d1] = input[:r0, r1:]
re[:d0, :d1] = input[r0:, r1:]
return re
def shiftFFT(inp, shift, method="fftw"):
"""
Do shift using FFTs
Shift an array like scipy.ndimage.interpolation.shift(input, shift, mode="wrap", order="infinity") but faster
@param input: 2d numpy array
@param shift: 2-tuple of float
@return: shifted image
"""
d0, d1 = inp.shape
v0, v1 = shift
f0 = numpy.fft.ifftshift(numpy.arange(-d0 // 2, d0 // 2))
f1 = numpy.fft.ifftshift(numpy.arange(-d1 // 2, d1 // 2))
m1, m0 = numpy.meshgrid(f1, f0)
e0 = numpy.exp(-2j * numpy.pi * v0 * m0 / float(d0))
e1 = numpy.exp(-2j * numpy.pi * v1 * m1 / float(d1))
e = e0 * e1
if method.startswith("fftw") and (fftw3 is not None):
input = numpy.zeros((d0, d1), dtype=complex)
output = numpy.zeros((d0, d1), dtype=complex)
with sem:
fft = fftw3.Plan(input, output, direction='forward', flags=['estimate'])
ifft = fftw3.Plan(output, input, direction='backward', flags=['estimate'])
input[:, :] = inp.astype(complex)
fft()
output *= e
ifft()
out = input / input.size
else:
out = numpy.fft.ifft2(numpy.fft.fft2(inp) * e)
return abs(out)
def maximum_position(img):
"""
Same as scipy.ndimage.measurements.maximum_position:
Find the position of the maximum of the values of the array.
@param img: 2-D image
@return: 2-tuple of int with the position of the maximum
"""
maxarg = numpy.argmax(img)
s0, s1 = img.shape
return (maxarg // s1, maxarg % s1)
def center_of_mass(img):
"""
Calculate the center of mass of of the array.
Like scipy.ndimage.measurements.center_of_mass
@param img: 2-D array
@return: 2-tuple of float with the center of mass
"""
d0, d1 = img.shape
a0, a1 = numpy.ogrid[:d0, :d1]
img = img.astype("float64")
img /= img.sum()
return ((a0 * img).sum(), (a1 * img).sum())
def measure_offset(img1, img2, method="numpy", withLog=False, withCorr=False):
"""
Measure the actual offset between 2 images
@param img1: ndarray, first image
@param img2: ndarray, second image, same shape as img1
@param withLog: shall we return logs as well ? boolean
@return: tuple of floats with the offsets
"""
method = str(method)
################################################################################
# Start convolutions
################################################################################
shape = img1.shape
logs = []
assert img2.shape == shape
t0 = time.time()
i1f = numpy.fft.fft2(img1)
i2f = numpy.fft.fft2(img2)
res = numpy.fft.ifft2(i1f * i2f.conjugate()).real
t1 = time.time()
################################################################################
# END of convolutions
################################################################################
offset1 = maximum_position(res)
res = shift(res, (shape[0] // 2 , shape[1] // 2))
mean = res.mean(dtype="float64")
maxi = res.max()
std = res.std(dtype="float64")
SN = (maxi - mean) / std
new = numpy.maximum(numpy.zeros(shape), res - numpy.ones(shape) * (mean + std * SN * 0.9))
com2 = center_of_mass(new)
logs.append("MeasureOffset: fine result of the centered image: %s %s " % com2)
offset2 = ((com2[0] - shape[0] // 2) % shape[0] , (com2[1] - shape[1] // 2) % shape[1])
delta0 = (offset2[0] - offset1[0]) % shape[0]
delta1 = (offset2[1] - offset1[1]) % shape[1]
if delta0 > shape[0] // 2:
delta0 -= shape[0]
if delta1 > shape[1] // 2:
delta1 -= shape[1]
if (abs(delta0) > 2) or (abs(delta1) > 2):
logs.append("MeasureOffset: Raw offset is %s and refined is %s. Please investigate !" % (offset1, offset2))
listOffset = list(offset2)
if listOffset[0] > shape[0] // 2:
listOffset[0] -= shape[0]
if listOffset[1] > shape[1] // 2:
listOffset[1] -= shape[1]
offset = tuple(listOffset)
t2 = time.time()
logs.append("MeasureOffset: fine result: %s %s" % offset)
logs.append("MeasureOffset: execution time: %.3fs with %.3fs for FFTs" % (t2 - t0, t1 - t0))
if withLog:
if withCorr:
return offset, logs, new
else:
return offset, logs
else:
if withCorr:
return offset, new
else:
return offset
class CheckCalib(object):
def __init__(self, poni, img):
self.ponifile = poni
self.ai = pyFAI.load(poni)
self.img = fabio.open(img)
self.r = None
self.I = None
self.resynth = None
self.delta = None
def __repr__(self, *args, **kwargs):
return self.ai.__repr__()
def integrate(self):
self.r, self.I = self.ai.integrate1d(self.img.data, 2048, unit="q_nm^-1")
def rebuild(self):
if self.r is None:
self.integrate()
self.resynth = self.ai.calcfrom1d(self.r, self.I, self.img.data.shape, mask=None,
dim1_unit="q_nm^-1", correctSolidAngle=True)
self.delta = self.resynth - self.img.data
self.offset, log = measure_offset(self.resynth, self.img.data, withLog=1)
print(os.linesep.join(log))
print(self.offset)
if __name__ == "__main__":
cc = CheckCalib(sys.argv[1], sys.argv[2])
cc.integrate()
cc.rebuild()
pylab.ion()
pylab.imshow(cc.delta, aspect="auto", interpolation=None, origin="bottom")
# pylab.show()
raw_input("Delta image")
pylab.imshow(cc.img.data, aspect="auto", interpolation=None, origin="bottom")
raw_input("raw image")
pylab.imshow(cc.resynth, aspect="auto", interpolation=None, origin="bottom")
raw_input("rebuild image")
pylab.clf()
pylab.plot(cc.r, cc.I)
raw_input("powder pattern")
pyFAI-0.11.0/test/test_blob_detection.py 0000644 0001773 0001774 00000006462 12527541311 021201 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for blob detection cython accelerated code"
__author__ = "Jérôme Kieffer"
__contact__ = "Jérôme Kieffer"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import sys
import unittest
import numpy
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from utilstest import getLogger # UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.detectors import detector_factory
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pyFAI.blob_detection import BlobDetection, local_max
from pyFAI import _blob
from pyFAI import _blob
def image_test_rings():
rings = 10
mod = 50
detector = detector_factory("Titan")
sigma = detector.pixel1 * 4
shape = detector.max_shape
ai = AzimuthalIntegrator(detector=detector)
ai.setFit2D(1000, 1000, 1000)
r = ai.rArray(shape)
r_max = r.max()
chi = ai.chiArray(shape)
img = numpy.zeros(shape)
modulation = (1 + numpy.sin(5 * r + chi * mod))
for radius in numpy.linspace(0, r_max, rings):
img += numpy.exp(-(r - radius) ** 2 / (2 * (sigma * sigma)))
return img * modulation
class TestBlobDetection(unittest.TestCase):
img = None
def setUp(self):
if self.img is None:
self.img = image_test_rings()
def test_local_max(self):
bd = BlobDetection(self.img)
bd._one_octave(shrink=False, refine=False, n_5=False)
self.assert_(numpy.alltrue(_blob.local_max(bd.dogs, bd.cur_mask, False) == \
local_max(bd.dogs, bd.cur_mask, False)), "max test, 3x3x3")
self.assert_(numpy.alltrue(_blob.local_max(bd.dogs, bd.cur_mask, True) == \
local_max(bd.dogs, bd.cur_mask, True)), "max test, 3x5x5")
def test_suite_all_blob_detection():
testSuite = unittest.TestSuite()
testSuite.addTest(TestBlobDetection("test_local_max"))
# testSuite.addTest(TestConvolution("test_vertical_convolution"))
# testSuite.addTest(TestConvolution("test_gaussian"))
# testSuite.addTest(TestConvolution("test_gaussian_filter"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_blob_detection()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/testimages/ 0000755 0001773 0001774 00000000000 12553735716 016766 5 ustar kieffer kieffer 0000000 0000000 pyFAI-0.11.0/test/testimages/LaB6.poni 0000644 0001773 0001774 00000000422 12321446670 020366 0 ustar kieffer kieffer 0000000 0000000 # Nota: C-Order, 1 refers to the Y axis, 2 to the X axis
PixelSize1: 4.842252e-05
PixelSize2: 4.684483e-05
Distance: 0.0994744403007
Poni1: 0.026453455358
Poni2: 0.0481217639198
Rot1: -0.000125830018938
Rot2: -0.0160719674782
Rot3: 1.57079531561
SplineFile: halfccd.spline
pyFAI-0.11.0/test/testimages/Pilatus6M.poni 0000644 0001773 0001774 00000000300 12406056755 021466 0 ustar kieffer kieffer 0000000 0000000 # Nota: C-Order, 1 refers to the Y axis, 2 to the X axis
PixelSize1: 0.000172
PixelSize2: 0.000172
Distance: 0.3
Poni1: 0.225406
Poni2: 0.2115772
Rot1: 0.0
Rot2: 0.0
Rot3: 0
SplineFile: None
pyFAI-0.11.0/test/testimages/Pilatus1M.poni 0000644 0001773 0001774 00000000401 12321446667 021464 0 ustar kieffer kieffer 0000000 0000000 # Nota: C-Order, 1 refers to the Y axis, 2 to the X axis
PixelSize1: 0.000172
PixelSize2: 0.000172
Distance: 1.58323111834
Poni1: 0.0334170169115
Poni2: 0.0412277798782
Rot1: 0.00648735642526
Rot2: 0.00755810191106
Rot3: 4.12987220385e-08
SplineFile: None
pyFAI-0.11.0/test/testimages/halfccd.poni 0000644 0001773 0001774 00000000422 12406056754 021232 0 ustar kieffer kieffer 0000000 0000000 # Nota: C-Order, 1 refers to the Y axis, 2 to the X axis
PixelSize1: 4.842252e-05
PixelSize2: 4.684483e-05
Distance: 0.0994744403007
Poni1: 0.026453455358
Poni2: 0.0481217639198
Rot1: -0.000125830018938
Rot2: -0.0160719674782
Rot3: 1.57079531561
SplineFile: halfccd.spline
pyFAI-0.11.0/test/testimages/Frelon2k.poni 0000644 0001773 0001774 00000000353 12406056755 021334 0 ustar kieffer kieffer 0000000 0000000 # Nota: C-Order, 1 refers to the Y axis, 2 to the X axis
PixelSize1: 4.683152e-05
PixelSize2: 4.722438e-05
Distance: 0.1057363
Poni1: 0.05301968
Poni2: 0.05660461
Rot1: 0.027767
Rot2: 0.016991
Rot3: -1.8e-05
SplineFile: frelon.spline
pyFAI-0.11.0/test/test_calibrant.py 0000644 0001773 0001774 00000015704 12553734565 020201 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
from __future__ import absolute_import, division, print_function
"""
Test suites for calibrants
"""
import unittest
import numpy
import sys
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
try:
from .utilstest import UtilsTest, getLogger
except (ValueError, SystemError):
from utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.calibrant import Calibrant, ALL_CALIBRANTS, Cell
from pyFAI.detectors import ALL_DETECTORS
class TestCalibrant(unittest.TestCase):
"""
Test calibrant installation and loading
"""
def test_factory(self):
# by default we provide 11 calibrants
l = len(ALL_CALIBRANTS)
self.assert_(l > 10, "at least 11 calibrants are available, got %s" % l)
self.assert_("LaB6" in ALL_CALIBRANTS, "LaB6 is a calibrant")
# ensure each calibrant instance is unique
cal1 = ALL_CALIBRANTS["LaB6"]
cal1.wavelength = 1e-10
cal2 = ALL_CALIBRANTS["LaB6"]
self.assert_(cal2.wavelength is None, "calibrant is delivered without wavelength")
# check that it is possible to instanciate all calibrant
for k, v in ALL_CALIBRANTS.items():
self.assertTrue(isinstance(v, Calibrant))
def test_2th(self):
lab6 = ALL_CALIBRANTS["LaB6"]
lab6.wavelength = 1.54e-10
tth = lab6.get_2th()
self.assert_(len(tth) == 25, "We expect 25 rings for LaB6")
lab6.setWavelength_change2th(1e-10)
tth = lab6.get_2th()
self.assert_(len(tth) == 25, "We still expect 25 rings for LaB6 (some are missing lost)")
lab6.setWavelength_change2th(2e-10)
tth = lab6.get_2th()
self.assert_(len(tth) == 15, "Only 15 remaining out of 25 rings for LaB6 (some additional got lost)")
def test_fake(self):
"""test for fake image generation"""
with_plot = False
if with_plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import rcParams
pp = PdfPages('fake.pdf')
rcParams['font.size'] = 6
plt.clf()
detectors = set(ALL_DETECTORS.values())
for idx, detector in enumerate(detectors):
det = detector()
# Skip generic detectors
if "MAX_SHAPE" not in dir(det):
continue
# skip the big detectors for now
if max(det.MAX_SHAPE) > 2000:
continue
ai = pyFAI.AzimuthalIntegrator(dist=0.01, poni1=0, poni2=0,
detector=det)
calibrant = ALL_CALIBRANTS["LaB6"]
calibrant.set_wavelength(1e-10)
img = calibrant.fake_calibration_image(ai)
if with_plot:
plt.clf
plt.subplot(3, 4, idx % 12)
plt.title(det.name)
plt.imshow(img, interpolation='nearest')
if idx != 0 and idx % 12 == 0:
pp.savefig()
plt.clf()
print(det.name, img.min(), img.max())
self.assert_(img.shape == det.shape, "Image (%s) has the right size" % (det.name,))
self.assert_(img.sum() > 0, "Image (%s) contains some data" % (det.name,))
sys.stderr.write(".")
if with_plot:
pp.savefig()
pp.close()
class TestCell(unittest.TestCase):
"""
Test generation of a calibrant from a cell
"""
def test_class(self):
c = Cell()
self.assertAlmostEqual(c.volume, 1.0, msg="Volume of triclinic 1,1,1,90,90,90 == 1.0, got %s" % c.volume)
c = Cell(1, 2, 3)
self.assertAlmostEqual(c.volume, 6.0, msg="Volume of triclinic 1,2,3,90,90,90 == 6.0, got %s" % c.volume)
c = Cell(1, 2, 3, 90, 30, 90)
self.assertAlmostEqual(c.volume, 3.0, msg="Volume of triclinic 1,2,3,90,30,90 == 3.0, got %s" % c.volume)
def test_classmethods(self):
c = Cell.cubic(1)
self.assertAlmostEqual(c.volume, 1.0, msg="Volume of cubic 1 == 1.0, got %s" % c.volume)
c = Cell.tetragonal(2, 3)
self.assertAlmostEqual(c.volume, 12.0, msg="Volume of tetragonal 2,3 == 12.0, got %s" % c.volume)
c = Cell.orthorhombic(1, 2, 3)
self.assertAlmostEqual(c.volume, 6.0, msg="Volume of orthorhombic 1,2,3 == 6.0, got %s" % c.volume)
def test_dspacing(self):
c = Cell.cubic(1)
cd = c.d_spacing(0.1)
cds = list(cd.keys())
cds.sort()
t = Cell()
td = t.d_spacing(0.1)
tds = list(td.keys())
tds.sort()
self.assertEquals(cds, tds, msg="d-spacings are the same")
for k in cds:
self.assertEquals(cd[k], td[k], msg="plans are the same for d=%s" % k)
def test_helium(self):
a = 4.242
href = "A.F. Schuch and R.L. Mills, Phys. Rev. Lett., 1961, 6, 596."
he = Cell.cubic(a)
self.assert_(len(he.d_spacing(1)) == 15, msg="got 15 lines for He")
he.save("He", "Helium", href, 1.0, UtilsTest.tempdir)
def test_hydrogen(self):
href = "DOI: 10.1126/science.239.4844.1131"
h = Cell.hexagonal(2.6590, 4.3340)
self.assertAlmostEqual(h.volume, 26.537, msg="Volume for H cell is correct")
self.assert_(len(h.d_spacing(1)) == 14, msg="got 14 lines for H")
h.save("H", "Hydrogen", href, 1.0, UtilsTest.tempdir)
def test_suite_all_calibrant():
testSuite = unittest.TestSuite()
testSuite.addTest(TestCalibrant("test_factory"))
testSuite.addTest(TestCalibrant("test_2th"))
testSuite.addTest(TestCalibrant("test_fake"))
testSuite.addTest(TestCell("test_class"))
testSuite.addTest(TestCell("test_classmethods"))
testSuite.addTest(TestCell("test_dspacing"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_calibrant()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/profile_ocl_hist_pixelsplit.py 0000644 0001773 0001774 00000007247 12527541311 022771 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time, os
import fabio
import pyopencl as cl
from pylab import *
from six.moves import input
print("#"*50)
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import splitPixelFull
from pyFAI import ocl_hist_pixelsplit
# from pyFAI import splitBBoxLUT
# from pyFAI import splitBBoxCSR
os.chdir("testimages")
ai = pyFAI.load("halfccd.poni")
data = fabio.open("halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
size = data.size
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.Buffer(ctx, mf.READ_WRITE, 4 * 4 * workgroup_size)
d_minmax = cl.Buffer(ctx, mf.READ_WRITE, 4 * 4)
with open("../../openCL/ocl_hist_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
compile_options = "-D BINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D EPS=%f" % \
(bins, size, workgroup_size, numpy.finfo(numpy.float32).eps)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size * workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult, d_minmax)
result = numpy.ndarray(4, dtype=numpy.float32)
cl.enqueue_copy(queue, result, d_minmax)
min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax = (min0, max0, min1, max1)
print(minmax)
print(result)
d_outData = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)
d_outCount = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)
d_outMerge = cl.Buffer(ctx, mf.READ_WRITE, 4 * bins)
program.memset_out(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
cl.enqueue_copy(queue, outData, d_outData)
cl.enqueue_copy(queue, outCount, d_outCount)
cl.enqueue_copy(queue, outMerge, d_outMerge)
global_size = (data.size + workgroup_size - 1) & ~(workgroup_size - 1),
d_image = cl.array.to_device(queue, data)
d_image_float = cl.Buffer(ctx, mf.READ_WRITE, 4 * size)
# program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # halfccd
program.integrate1(queue, global_size, (workgroup_size,), d_pos.data, d_image_float, d_minmax, numpy.int32(data.size), d_outData, d_outCount)
cl.enqueue_copy(queue, outData, d_outData)
cl.enqueue_copy(queue, outCount, d_outCount)
cl.enqueue_copy(queue, outMerge, d_outMerge)
program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
cl.enqueue_copy(queue, outData, d_outData)
cl.enqueue_copy(queue, outCount, d_outCount)
cl.enqueue_copy(queue, outMerge, d_outMerge)
ref = ai.xrpd_LUT(data, bins, correctSolidAngle=False)
test = splitPixelFull.fullSplit1D(pos, data, bins)
# assert(numpy.allclose(ref,outMerge))
# plot(outMerge, label="ocl_hist")
plot(ref[0], test[1], label="splitPixelFull")
plot(ref[0], ref[1], label="ref")
# plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
show()
input()
pyFAI-0.11.0/test/test_mask.py 0000755 0001773 0001774 00000037550 12527541311 017165 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for masked arrays"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "10/03/2015"
import unittest
import numpy
import logging
import sys
import fabio
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
class TestMask(unittest.TestCase):
dataFile = "1894/testMask.edf"
poniFile = "1893/Pilatus1M.poni"
def setUp(self):
"""Download files"""
self.dataFile = UtilsTest.getimage(self.__class__.dataFile)
self.poniFile = UtilsTest.getimage(self.__class__.poniFile)
self.ai = pyFAI.load(self.poniFile)
# self.ai.mask = None
self.data = fabio.open(self.dataFile).data
self.mask = self.data < 0
def test_mask_hist(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth="cython"
x1 = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="no mask")
pylab.plot(*x2, label="with mask")
pylab.plot(*x3, label="with dummy")
pylab.title("test_mask_splitBBox")
pylab.legend()
pylab.show()
raw_input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_splitBBox(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth="splitbbox"
x1 = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="no mask")
pylab.plot(*x2, label="with mask")
pylab.plot(*x3, label="with dummy")
pylab.title("test_mask_splitBBox")
pylab.legend()
pylab.show()
raw_input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_splitfull(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth="splitpixel"
x1 = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method=meth)
x2 = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
x3 = self.ai.integrate1d(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="no mask")
pylab.plot(*x2, label="with mask")
pylab.plot(*x3, label="with dummy")
pylab.title("test_mask_splitBBox")
pylab.legend()
pylab.show()
raw_input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_LUT(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth="lut"
x1 = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
x2 = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
x3 = self.ai.integrate1d(self.data, 1000, mask=numpy.zeros(shape=self.mask.shape, dtype="uint8"), dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="nomask")
pylab.plot(*x2, label="mask")
pylab.plot(*x3, label="dummy")
pylab.legend()
pylab.show()
raw_input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_CSR(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth="csr"
x1 = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
x2 = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
x3 = self.ai.integrate1d(self.data, 1000, mask=numpy.zeros(shape=self.mask.shape, dtype="uint8"), dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="nomask")
pylab.plot(*x2, label="mask")
pylab.plot(*x3, label="dummy")
pylab.legend()
pylab.show()
raw_input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1, msg="With mask the bad pixels are actually Nan (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_LUT_OCL(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = "lut_ocl"
x1 = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
x2 = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
x3 = self.ai.integrate1d(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="nomask")
pylab.plot(*x2, label="mask")
pylab.plot(*x3, label="dummy")
pylab.legend()
pylab.show()
raw_input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1,msg="With mask the bad pixels are actually around 0 (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
def test_mask_CSR_OCL(self):
"""
The masked image has a masked ring around 1.5deg with value -10
without mask the pixels should be at -10 ; with mask they are at 0
"""
meth = "CSR_ocl"
x1 = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
x2 = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
x3 = self.ai.integrate1d(self.data, 1000, dummy=-20.0, delta_dummy=19.5, unit="2th_deg", method=meth)
# print self.ai._lut_integrator.lut_checksum
res1 = numpy.interp(1.5, *x1)
res2 = numpy.interp(1.5, *x2)
res3 = numpy.interp(1.5, *x3)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(*x1, label="nomask")
pylab.plot(*x2, label="mask")
pylab.plot(*x3, label="dummy")
pylab.legend()
pylab.show()
raw_input()
self.assertAlmostEqual(res1, -10., 1, msg="Without mask the bad pixels are around -10 (got %.4f)" % res1)
self.assertAlmostEqual(res2, 0, 1,msg="With mask the bad pixels are actually around 0 (got %.4f)" % res2)
self.assertAlmostEqual(res3, -20., 4, msg="Without mask but dummy=-20 the dummy pixels are actually at -20 (got % .4f)" % res3)
class TestMaskBeamstop(unittest.TestCase):
"""
Test for https://github.com/kif/pyFAI/issues/76
"""
dataFile = "1788/moke.tif"
def setUp(self):
"""
Download files
Create a mask for tth<3.7 deg
"""
self.dataFile = UtilsTest.getimage(self.__class__.dataFile)
detector = pyFAI.detectors.Detector(pixel1=0.0001, pixel2=0.0001)
self.ai = pyFAI.AzimuthalIntegrator(dist=0.1, poni1=0.03, poni2=0.03, detector=detector)
self.data = fabio.open(self.dataFile).data
self.tth, self.I = self.ai.integrate1d(self.data, 1000, unit="2th_deg")
self.mask = self.ai.ttha < numpy.deg2rad(3.7)
def test_nomask(self):
"""
without mask, tth value should start at 0
"""
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.plot(self.tth, self.I, label="nomask")
pylab.legend()
pylab.show()
raw_input()
self.assertAlmostEqual(self.tth[0], 0.0, 1, "tth without mask starts at 0")
def test_mask_splitBBox(self):
"""
With a mask with and without limits
"""
tth, I = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method="splitBBox")
self.assertAlmostEqual(tth[0], 3.7, 1, msg="tth range starts at 3.7 (got %.4f)" % tth[0])
tth, I = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method="splitBBox", radial_range=[1, 10])
self.assertAlmostEqual(tth[0], 1.0, 1, msg="tth range should start at 1.0 (got %.4f)" % tth[0])
def test_mask_LUT(self):
"""
With a mask with and without limits
"""
tth, I = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method="LUT")
self.assertAlmostEqual(tth[0], 3.7, 1, msg="tth range starts at 3.7 (got %.4f)" % tth[0])
tth, I = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method="LUT", radial_range=[1, 10])
self.assertAlmostEqual(tth[0], 1.0, 1, msg="tth range should start at 1.0 (got %.4f)" % tth[0])
def test_mask_LUT_OCL(self):
"""
With a mask with and without limits
"""
tth, I = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method="lut_ocl")
self.assert_(tth[0] > 3.5, msg="tth range starts at 3.7 (got %.4f)" % tth[0])
tth, I = self.ai.integrate1d(self.data, 1000, mask=self.mask, unit="2th_deg", method="lut_ocl", radial_range=[1, 10])
self.assertAlmostEqual(tth[0], 1.0, 1, msg="tth range should start at 1.0 (got %.4f)" % tth[0])
def test_nomask_LUT(self):
"""
without mask, tth value should start at 0
"""
tth, I = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method="lut")
self.assertAlmostEqual(tth[0], 0.0, 1, msg="tth range starts at 3.7 (got %.4f)" % tth[0])
tth, I = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method="lut", radial_range=[1, 10])
self.assertAlmostEqual(tth[0], 1.0, 1, msg="tth range should start at 1.0 (got %.4f)" % tth[0])
def test_nomask_LUT_OCL(self):
"""
without mask, tth value should start at 0
"""
tth, I = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method="lut_ocl")
self.assertAlmostEqual(tth[0], 0.0, 1, msg="tth range starts at 3.7 (got %.4f)" % tth[0])
tth, I = self.ai.integrate1d(self.data, 1000, unit="2th_deg", method="lut_ocl", radial_range=[1, 10])
self.assertAlmostEqual(tth[0], 1.0, 1, msg="tth range should start at 1.0 (got %.4f)" % tth[0])
def test_suite_all_Mask():
testSuite = unittest.TestSuite()
testSuite.addTest(TestMask("test_mask_hist"))
testSuite.addTest(TestMask("test_mask_splitBBox"))
testSuite.addTest(TestMask("test_mask_splitfull"))
testSuite.addTest(TestMask("test_mask_LUT"))
testSuite.addTest(TestMask("test_mask_CSR"))
testSuite.addTest(TestMask("test_mask_LUT_OCL"))
testSuite.addTest(TestMask("test_mask_CSR_OCL"))
testSuite.addTest(TestMaskBeamstop("test_nomask"))
testSuite.addTest(TestMaskBeamstop("test_mask_splitBBox"))
testSuite.addTest(TestMaskBeamstop("test_mask_LUT"))
testSuite.addTest(TestMaskBeamstop("test_mask_LUT_OCL"))
testSuite.addTest(TestMaskBeamstop("test_nomask_LUT"))
testSuite.addTest(TestMaskBeamstop("test_nomask_LUT_OCL"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Mask()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/profile_ocl_lut_pixelsplit3.py 0000644 0001773 0001774 00000011731 12527541311 022702 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio
import pyopencl as cl
from pylab import *
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
#from pyFAI import splitBBoxCSR
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size/8,4,2)
pos_size = pos.size
#size = data.size
size = pos_size/8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4*workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
compile_options = "-D BINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D EPS=%e" % \
(bins, size, workgroup_size, numpy.finfo(numpy.float32).eps)
print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size*workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax=(min0,max0,min1,max1)
print(minmax)
print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
outMax_1 = numpy.copy(d_outMax)
d_idx_ptr = cl.array.empty(queue, (bins+1,), dtype=numpy.int32)
d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
cl.enqueue_copy(queue, lutsize, d_lutsize.data)
print(lutsize)
lut_size = int(lutsize[0])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
#d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
#check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
#cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
#program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins*workgroup_size,),(workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
#outData = numpy.ndarray(bins, dtype=numpy.float32)
#outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
cl.enqueue_copy(queue,outMerge, d_outMerge.data)
#program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
#cl.enqueue_copy(queue,outMerge, d_outMerge)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="splitpixelfull")
ref = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
#assert(numpy.allclose(ref,outMerge))
##plot(ref[0],outMerge, label="ocl_lut_merge")
###plot(ref[0],outData, label="ocl_lut_data")
###plot(ref[0],outCount, label="ocl_lut_count")
#plot(ref[1], label="ref_merge")
###plot(ref[0], ref[2], label="ref_data")
###plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
#legend()
#show()
#raw_input()
pyFAI-0.11.0/test/test_sparse.py 0000644 0001773 0001774 00000006274 12527541311 017523 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"""
Test suites for sparse matrix multiplication modules
"""
import unittest, numpy, os, sys, time
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitBBox
from pyFAI import splitBBoxCSR
from pyFAI import splitBBoxLUT
import fabio
class TestSparseBBox(unittest.TestCase):
"""Test Azimuthal integration based sparse matrix mutiplication methods
Bounding box pixel splitting
"""
N = 1000
unit = "2th_deg"
ai = pyFAI.load(UtilsTest.getimage("1893/Pilatus1M.poni"))
data = fabio.open(UtilsTest.getimage("1883/Pilatus1M.edf")).data
ref = ai.integrate1d(data, N, correctSolidAngle=False, unit=unit, method="splitBBox")[1]
cython = splitBBox.histoBBox1d(data, ai._ttha, ai._dttha, bins=N)
def test_LUT(self):
obt = self.ai.integrate1d(self.data, self.N, correctSolidAngle=False, unit=self.unit, method="LUT")[1]
logger.debug("delta on global result: %s" % (abs(obt - self.ref) / self.ref).max())
self.assert_(numpy.allclose(obt, self.ref))
cython = self.ai._lut_integrator.integrate(self.data)
for ref, obt in zip(self.cython, cython):
logger.debug("delta on cython result: %s" % (abs(obt - ref) / ref).max())
self.assert_(numpy.allclose(obt, ref))
def test_CSR(self):
obt = self.ai.integrate1d(self.data, self.N, correctSolidAngle=False, unit=self.unit, method="CSR")[1]
logger.debug("delta on global result: %s" % (abs(obt - self.ref) / self.ref).max())
self.assert_(numpy.allclose(obt, self.ref))
cython = self.ai._csr_integrator.integrate(self.data)
for ref, obt in zip(self.cython, cython):
logger.debug("delta on cython result: %s" % (abs(obt - ref) / ref).max())
self.assert_(numpy.allclose(obt, ref))
def test_suite_all_sparse():
testSuite = unittest.TestSuite()
testSuite.addTest(TestSparseBBox("test_LUT"))
testSuite.addTest(TestSparseBBox("test_CSR"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_sparse()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/profile_pixelsplitFullLUT.py 0000644 0001773 0001774 00000006133 12527541311 022306 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio
import pyopencl as cl
from pylab import *
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import splitPixelFullLUT_float32
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import splitPixelFull
import scipy
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size/8,4,2)
pos_size = pos.size
#size = data.size
size = pos_size/8
boo = splitPixelFullLUT_float32.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
matrix_32 = scipy.sparse.csr_matrix((boo.data,boo.indices,boo.indptr), shape=(bins,data.size))
mat32d = matrix_32.todense()
#mat32d.shape = (mat32d.size,)
#out = boo.integrate(data)
#ai.xrpd_LUT(data, 1000)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
matrix_64 = scipy.sparse.csr_matrix((foo.data,foo.indices,foo.indptr), shape=(bins,data.size))
mat64d = matrix_64.todense()
#mat64d.shape = (mat64d.size,)
#foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
bools_bad = (abs(mat32d - mat64d) > 0.000001)
#bools_good = (abs(mat32d - mat64d) <= 0.000001)
del mat32d
del mat64d
del matrix_32
del matrix_64
tmp = numpy.where(bools_bad)[1].ravel()
pixels_bad = numpy.copy(tmp)
pixels_bad.sort()
#tmp = numpy.where(bools_good)[1]
#pixels_good = numpy.copy(tmp)
#pixels_good.sort()
#ref = splitPixelFull.fullSplit1D(pos, data, bins)
#ref = foo.integrate(data)
#assert(numpy.allclose(ref[1],outMerge))
#plot(ref[0],outMerge, label="ocl_lut_merge")
#plot(ref[0],outData, label="ocl_lut_data")
#plot(ref[0],outCount, label="ocl_lut_count")
#plot(out[0], out[1], label="ocl_lut_merge")
#plot(out[0], out[2], label="ocl_lut_data")
#plot(out[0], out[3], label="ocl_lut_count")
#plot(ref[0], ref[1], label="ref_merge")
#plot(ref[0], ref[2], label="ref_data")
#plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
#legend()
#show()
#raw_input()
#aaa = 0
#bbb = 0
#for i in range(bins):
#ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
#ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
#sort1 = numpy.argsort(ind_tmp1)
#sort2 = numpy.argsort(ind_tmp2)
#data_1 = data_tmp1[sort1]
#data_2 = data_tmp2[sort2]
#for j in range(data_1.size):
#aaa += 1
#if not numpy.allclose(data_1[j],data_2[j]):
#bbb += 1
#print data_1[j],data_2[j],numpy.allclose(data_1[j],data_2[j]), idx_ptr[i]+j
#print aaa,bbb pyFAI-0.11.0/test/profile_csr_2d.py 0000644 0001773 0001774 00000001712 12527541311 020053 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio, pyopencl
from pylab import *
from six.moves import input
print("#"*50)
pyFAI = sys.modules["pyFAI"]
#from pyFAI import splitBBox
#from pyFAI import splitBBoxLUT
#from pyFAI import splitBBoxCSR
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/Pilatus1M.poni")
data = fabio.open("testimages/Pilatus1M.edf").data
#ref = ai.xrpd_LUT(data, 1000)[1]
#obt = ai.xrpd_LUT_OCL(data, 1000)[1]
ref = ai.integrate2d(data, 100, 360, method="lut", unit="2th_deg")[0]
obt = ai.integrate2d(data, 100, 360, method="ocl_csr", unit="2th_deg")[0]
##logger.debug("check LUT basics: %s"%abs(obt[1] - ref[1]).max())
assert numpy.allclose(ref,obt)
plot(ref.ravel(), label="ocl_lut")
plot(obt.ravel(), label="ocl_csr")
legend()
show()
input()
pyFAI-0.11.0/test/test_bilinear.py 0000755 0001773 0001774 00000012046 12527541311 020010 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for bilinear interpolator class"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "08/05/2015"
import unittest
import os
import numpy
# import logging # , time
import sys
# import fabio
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import getLogger, UtilsTest
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI import bilinear
# from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
# if logger.getEffectiveLevel() <= logging.INFO:
# import pylab
# from pyFAI import bilinear
# bilinear = sys.modules["pyFAI.bilinear"]
class TestBilinear(unittest.TestCase):
"""basic maximum search test"""
N = 10000
def test_max_search_round(self):
"""test maximum search using random points: maximum is at the pixel center"""
a = numpy.arange(100) - 40.
b = numpy.arange(100) - 60.
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = bilinear.Bilinear(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40) > 1e-4 or abs(l - 60) > 1e-4:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)" % (i, j, k, l))
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)" % (i, j, k, l))
ok += 1
logger.info("Success rate: %.1f" % (100.*ok / self.N))
self.assertEqual(ok, self.N, "Maximum is always found")
def test_max_search_half(self):
"""test maximum search using random points: maximum is at a pixel edge"""
a = numpy.arange(100) - 40.5
b = numpy.arange(100) - 60.5
ga = numpy.exp(-a * a / 4000)
gb = numpy.exp(-b * b / 6000)
gg = numpy.outer(ga, gb)
b = bilinear.Bilinear(gg)
ok = 0
for s in range(self.N):
i, j = numpy.random.randint(100), numpy.random.randint(100)
k, l = b.local_maxi((i, j))
if abs(k - 40.5) > 0.5 or abs(l - 60.5) > 0.5:
logger.warning("Wrong guess maximum (%i,%i) -> (%.1f,%.1f)" % (i, j, k, l))
else:
logger.debug("Good guess maximum (%i,%i) -> (%.1f,%.1f)" % (i, j, k, l))
ok += 1
logger.info("Success rate: %.1f" % (100. * ok / self.N))
self.assertEqual(ok, self.N, "Maximum is always found")
class TestConversion(unittest.TestCase):
"""basic 2d -> 4d transformation and vice-versa"""
def test4d(self):
Nx = 1000
Ny = 1024
y, x = numpy.mgrid[:Ny + 1, :Nx + 1]
y = y.astype(float)
x = x.astype(float)
pos = bilinear.convert_corner_2D_to_4D(3, y, x)
y1, x1, z1 = bilinear.calc_cartesian_positions(y.ravel(), x.ravel(), pos)
self.assert_(numpy.allclose(y.ravel(), y1), "Maximum error on y is %s" % (abs(y.ravel() - y1).max()))
self.assert_(numpy.allclose(x.ravel(), x1), "Maximum error on x is %s" % (abs(x.ravel() - x1).max()))
self.assertEqual(z1, None, "flat detector")
x = x[:-1, :-1] + 0.5
y = y[:-1, :-1] + 0.5
y1, x1, z1 = bilinear.calc_cartesian_positions((y).ravel(), (x).ravel(), pos)
self.assert_(numpy.allclose(y.ravel(), y1), "Maximum error on y_center is %s" % (abs(y.ravel() - y1).max()))
self.assert_(numpy.allclose(x.ravel(), x1), "Maximum error on x_center is %s" % (abs(x.ravel() - x1).max()))
self.assertEqual(z1, None, "flat detector")
def test_suite_all_bilinear():
testSuite = unittest.TestSuite()
testSuite.addTest(TestBilinear("test_max_search_round"))
testSuite.addTest(TestBilinear("test_max_search_half"))
testSuite.addTest(TestConversion("test4d"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_bilinear()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up()
pyFAI-0.11.0/test/test_openCL.py 0000755 0001773 0001774 00000032623 12527541311 017406 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for OpenCL code"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "02/04/2015"
import unittest
import os
import time
import sys
import fabio
import gc
import tempfile
import numpy
import platform
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger, recursive_delete
logger = getLogger(__file__)
try:
import pyopencl
except ImportError as error:
logger.warning("OpenCL module (pyopencl) is not present, skip tests. %s." % error)
skip = True
else:
skip = False
pyFAI = sys.modules["pyFAI"]
from pyFAI.opencl import ocl
if ocl is None:
skip = True
else:
pyopencl = pyFAI.opencl.pyopencl
import pyopencl.array
class TestMask(unittest.TestCase):
tmp_dir = tempfile.mkdtemp(prefix="pyFAI_test_OpenCL_")
N = 1000
def setUp(self):
self.datasets = [{"img": UtilsTest.getimage("1883/Pilatus1M.edf"),
"poni": UtilsTest.getimage("1893/Pilatus1M.poni"),
"spline": None},
{"img": UtilsTest.getimage("1882/halfccd.edf"),
"poni": UtilsTest.getimage("1895/halfccd.poni"),
"spline": UtilsTest.getimage("1461/halfccd.spline")},
{"img": UtilsTest.getimage("1881/Frelon2k.edf"),
"poni": UtilsTest.getimage("1896/Frelon2k.poni"),
"spline": UtilsTest.getimage("1900/frelon.spline")},
{"img": UtilsTest.getimage("1884/Pilatus6M.cbf"),
"poni": UtilsTest.getimage("1897/Pilatus6M.poni"),
"spline": None},
]
if not os.path.isdir(self.tmp_dir):
os.makedirs(self.tmp_dir)
for ds in self.datasets:
if ds["spline"] is not None:
data = open(ds["poni"], "r").read()
# spline = os.path.basename(ds["spline"])
with open(ds["poni"]) as f:
data = []
for line in f:
if line.startswith("SplineFile:"):
data.append("SplineFile: " + ds["spline"])
else:
data.append(line.strip())
ds["poni"] = os.path.join(self.tmp_dir, os.path.basename(ds["poni"]))
with open(ds["poni"], "w") as f:
f.write(os.linesep.join(data))
def tearDown(self):
recursive_delete(self.tmp_dir)
def test_OpenCL(self):
logger.info("Testing histogram-based algorithm (forward-integration)")
for devtype in ("GPU", "CPU"):
ids = ocl.select_device(devtype, extensions=["cl_khr_int64_base_atomics"])
if ids is None:
logger.error("No suitable %s OpenCL device found" % devtype)
continue
else:
logger.info("I found a suitable device %s %s: %s %s " % (devtype, ids, ocl.platforms[ids[0]], ocl.platforms[ids[0]].devices[ids[1]]))
for ds in self.datasets:
ai = pyFAI.load(ds["poni"])
data = fabio.open(ds["img"]).data
res = ai.xrpd_OpenCL(data, self.N, devicetype="all", platformid=ids[0], deviceid=ids[1], useFp64=True)
ref = ai.integrate1d(data, self.N, method="splitBBox", unit="2th_deg")
r = Rwp(ref, res)
logger.info("OpenCL histogram vs histogram SplitBBox has R= %.3f for dataset %s" % (r, ds))
self.assertTrue(r < 6, "Rwp=%.3f for OpenCL histogram processing of %s" % (r, ds))
del ai, data
gc.collect()
def test_OpenCL_LUT(self):
logger.info("Testing LUT-based algorithm (backward-integration)")
for devtype in ("GPU", "CPU"):
ids = ocl.select_device(devtype, best=True)
if ids is None:
logger.error("No suitable %s OpenCL device found" % devtype)
continue
else:
logger.info("I found a suitable device %s %s: %s %s " % (devtype, ids, ocl.platforms[ids[0]], ocl.platforms[ids[0]].devices[ids[1]]))
for ds in self.datasets:
ai = pyFAI.load(ds["poni"])
data = fabio.open(ds["img"]).data
ref = ai.integrate1d(data, self.N, method="splitBBox", unit="2th_deg")
try:
res = ai.integrate1d(data, self.N, method="ocl_lut_%i,%i" % (ids[0], ids[1]), unit="2th_deg")
except (pyFAI.opencl.pyopencl.MemoryError, MemoryError, pyFAI.opencl.pyopencl.RuntimeError, RuntimeError) as error:
logger.warning("Memory error on %s dataset %s: %s%s. Converted into warnining: device may not have enough memory." % (devtype, os.path.basename(ds["img"]), os.linesep, error))
break
else:
ref = ai.xrpd(data, self.N)
r = Rwp(ref, res)
logger.info("OpenCL CSR vs histogram SplitBBox has R= %.3f for dataset %s" % (r, ds))
self.assertTrue(r < 3, "Rwp=%.3f for OpenCL LUT processing of %s" % (r, ds))
del ai, data
gc.collect()
def test_OpenCL_CSR(self):
logger.info("Testing CSR-based algorithm (backward-integration)")
for devtype in ("GPU", "CPU"):
ids = ocl.select_device(devtype, best=True)
if ids is None:
logger.error("No suitable %s OpenCL device found" % devtype)
continue
else:
logger.info("I found a suitable device %s %s: %s %s " % (devtype, ids, ocl.platforms[ids[0]], ocl.platforms[ids[0]].devices[ids[1]]))
for ds in self.datasets:
ai = pyFAI.load(ds["poni"])
data = fabio.open(ds["img"]).data
ref = ai.integrate1d(data, self.N, method="splitBBox", unit="2th_deg")
try:
res = ai.integrate1d(data, self.N, method="ocl_csr_%i,%i" % (ids[0], ids[1]), unit="2th_deg")
except (pyFAI.opencl.pyopencl.MemoryError, MemoryError, pyFAI.opencl.pyopencl.RuntimeError, RuntimeError) as error:
logger.warning("Memory error on %s dataset %s: %s%s. Converted into Warning: device may not have enough memory." % (devtype, os.path.basename(ds["img"]), os.linesep, error))
break
else:
r = Rwp(ref, res)
logger.info("OpenCL CSR vs histogram SplitBBox has R= %.3f for dataset %s" % (r, ds))
self.assertTrue(r < 3, "Rwp=%.3f for OpenCL CSR processing of %s" % (r, ds))
del ai, data
gc.collect()
class TestSort(unittest.TestCase):
"""
Test the kernels for vector and image sorting
"""
N = 1024
ws = N // 8
def setUp(self):
self.h_data = numpy.random.random(self.N).astype("float32")
self.h2_data = numpy.random.random((self.N, self.N)).astype("float32").reshape((self.N, self.N))
self.ctx = ocl.create_context(devicetype="GPU")
device = self.ctx.devices[0]
try:
devtype = pyopencl.device_type.to_string(device.type).upper()
except ValueError:
# pocl does not describe itself as a CPU !
devtype = "CPU"
workgroup = device.max_work_group_size
if (devtype == "CPU") and (device.platform.vendor == "Apple"):
logger.info("For Apple's OpenCL on CPU: enforce max_work_goup_size=1")
workgroup = 1
self.ws = min(workgroup, self.ws)
self.queue = pyopencl.CommandQueue(self.ctx, properties=pyopencl.command_queue_properties.PROFILING_ENABLE)
self.local_mem = pyopencl.LocalMemory(self.ws * 32) # 2float4 = 2*4*4 bytes per workgroup size
src = pyFAI.utils.read_cl_file("bitonic.cl")
self.prg = pyopencl.Program(self.ctx, src).build()
def tearDown(self):
self.h_data = None
self.queue = None
self.ctx = None
self.local_mem = None
self.h2_data = None
def test_reference_book(self):
d_data = pyopencl.array.to_device(self.queue, self.h_data)
t0 = time.time()
hs_data = numpy.sort(self.h_data)
t1 = time.time()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_book(self.queue, (self.ws,), (self.ws,), d_data.data, self.local_mem)
evt.wait()
err = abs(hs_data - d_data.get()).max()
logger.info("test_reference_book")
logger.info("Numpy sort on %s element took %s ms" % (self.N, time_sort))
logger.info("Reference sort time: %s ms, err=%s " % (1e-6 * (evt.profile.end - evt.profile.start), err))
# this test works under linux:
if platform.system() == "Linux":
self.assert_(err == 0.0)
else:
logger.warning("Measured error on %s is %s" % (platform.system(), err))
def test_reference_file(self):
d_data = pyopencl.array.to_device(self.queue, self.h_data)
t0 = time.time()
hs_data = numpy.sort(self.h_data)
t1 = time.time()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_file(self.queue, (self.ws,), (self.ws,), d_data.data, self.local_mem)
evt.wait()
err = abs(hs_data - d_data.get()).max()
logger.info("test_reference_file")
logger.info("Numpy sort on %s element took %s ms" % (self.N, time_sort))
logger.info("Reference sort time: %s ms, err=%s " % (1e-6 * (evt.profile.end - evt.profile.start), err))
# this test works anywhere !
self.assert_(err == 0.0)
def test_sort_all(self):
d_data = pyopencl.array.to_device(self.queue, self.h_data)
t0 = time.time()
hs_data = numpy.sort(self.h_data)
t1 = time.time()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_all(self.queue, (self.ws,), (self.ws,), d_data.data, self.local_mem)
evt.wait()
err = abs(hs_data - d_data.get()).max()
logger.info("test_sort_all")
logger.info("Numpy sort on %s element took %s ms" % (self.N, time_sort))
logger.info("modified function execution time: %s ms, err=%s " % (1e-6 * (evt.profile.end - evt.profile.start), err))
self.assert_(err == 0.0)
def test_sort_horizontal(self):
d2_data = pyopencl.array.to_device(self.queue, self.h2_data)
t0 = time.time()
h2s_data = numpy.sort(self.h2_data, axis=-1)
t1 = time.time()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_horizontal(self.queue, (self.N, self.ws), (1, self.ws), d2_data.data, self.local_mem)
evt.wait()
err = abs(h2s_data - d2_data.get()).max()
logger.info("Numpy horizontal sort on %sx%s elements took %s ms" % (self.N, self.N, time_sort))
logger.info("Horizontal execution time: %s ms, err=%s " % (1e-6 * (evt.profile.end - evt.profile.start), err))
self.assert_(err == 0.0)
def test_sort_vertical(self):
d2_data = pyopencl.array.to_device(self.queue, self.h2_data)
t0 = time.time()
h2s_data = numpy.sort(self.h2_data, axis=0)
t1 = time.time()
time_sort = 1e3 * (t1 - t0)
evt = self.prg.bsort_vertical(self.queue, (self.ws, self.N), (self.ws, 1), d2_data.data, self.local_mem)
evt.wait()
err = abs(h2s_data - d2_data.get()).max()
logger.info("Numpy vertical sort on %sx%s elements took %s ms" % (self.N, self.N, time_sort))
logger.info("Vertical execution time: %s ms, err=%s " % (1e-6 * (evt.profile.end - evt.profile.start), err))
self.assert_(err == 0.0)
def test_suite_all_OpenCL():
testSuite = unittest.TestSuite()
if skip:
logger.warning("OpenCL module (pyopencl) is not present or no device available: skip tests")
else:
testSuite.addTest(TestMask("test_OpenCL"))
testSuite.addTest(TestMask("test_OpenCL_LUT"))
testSuite.addTest(TestMask("test_OpenCL_CSR"))
testSuite.addTest(TestSort("test_reference_book"))
testSuite.addTest(TestSort("test_reference_file"))
testSuite.addTest(TestSort("test_sort_all"))
testSuite.addTest(TestSort("test_sort_horizontal"))
testSuite.addTest(TestSort("test_sort_vertical"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_OpenCL()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/test_saxs.py 0000755 0001773 0001774 00000014553 12527541311 017206 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://forge.epn-campus.eu/projects/azimuthal
#
# File: "$Id$"
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for masked arrays"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
import os
import numpy
import logging, time
import sys
import fabio
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
class TestSaxs(unittest.TestCase):
img = UtilsTest.getimage("1883/Pilatus1M.edf")
data = fabio.open(img).data
ai = pyFAI.AzimuthalIntegrator(1.58323111834, 0.0334170169115, 0.0412277798782, 0.00648735642526, 0.00755810191106, 0.0, detector=pyFAI.detectors.Pilatus1M())
ai.wavelength = 1e-10
npt = 1000
def testMask(self):
ss = self.ai.mask.sum()
self.assertTrue(ss == 73533, "masked pixel = %s expected 73533" % ss)
def testNumpy(self):
qref, Iref, s = self.ai.saxs(self.data, self.npt)
q, I, s = self.ai.saxs(self.data, self.npt, error_model="poisson", method="numpy")
self.assertTrue(q[0] > 0, "q[0]>0 %s" % q[0])
self.assertTrue(q[-1] < 8, "q[-1] < 8, got %s" % q[-1])
self.assertTrue(s.min() >= 0, "s.min() >= 0 got %s" % (s.min()))
self.assertTrue(s.max() < 21, "s.max() < 21 got %s" % (s.max()))
self.assertTrue(I.max() < 52000, "I.max() < 52000 got %s" % (I.max()))
self.assertTrue(I.min() >= 0, "I.min() >= 0 got %s" % (I.min()))
R = Rwp((q, I), (qref, Iref))
if R > 20: logger.error("Numpy has R=%s" % R)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.errorbar(q, I, s, label="Numpy R=%.1f" % R)
pylab.yscale("log")
self.assertTrue(R < 20, "Numpy: Measure R=%s<2" % R)
def testCython(self):
qref, Iref, s = self.ai.saxs(self.data, self.npt)
q, I, s = self.ai.saxs(self.data, self.npt, error_model="poisson", method="cython")
self.assertTrue(q[0] > 0, "q[0]>0 %s" % q[0])
self.assertTrue(q[-1] < 8, "q[-1] < 8, got %s" % q[-1])
self.assertTrue(s.min() >= 0, "s.min() >= 0 got %s" % (s.min()))
self.assertTrue(s.max() < 21, "s.max() < 21 got %s" % (s.max()))
self.assertTrue(I.max() < 52000, "I.max() < 52000 got %s" % (I.max()))
self.assertTrue(I.min() >= 0, "I.min() >= 0 got %s" % (I.min()))
R = Rwp((q, I), (qref, Iref))
if R > 20: logger.error("Cython has R=%s" % R)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.errorbar(q, I, s, label="Cython R=%.1f" % R)
pylab.yscale("log")
self.assertTrue(R < 20, "Cython: Measure R=%s<2" % R)
def testSplitBBox(self):
qref, Iref, s = self.ai.saxs(self.data, self.npt)
q, I, s = self.ai.saxs(self.data, self.npt, error_model="poisson", method="splitbbox")
self.assertTrue(q[0] > 0, "q[0]>0 %s" % q[0])
self.assertTrue(q[-1] < 8, "q[-1] < 8, got %s" % q[-1])
self.assertTrue(s.min() >= 0, "s.min() >= 0 got %s" % (s.min()))
self.assertTrue(s.max() < 21, "s.max() < 21 got %s" % (s.max()))
self.assertTrue(I.max() < 52000, "I.max() < 52000 got %s" % (I.max()))
self.assertTrue(I.min() >= 0, "I.min() >= 0 got %s" % (I.min()))
R = Rwp((q, I), (qref, Iref))
if R > 20: logger.error("SplitPixel has R=%s" % R)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.errorbar(q, I, s, label="SplitBBox R=%.1f" % R)
pylab.yscale("log")
self.assertEqual(R < 20, True, "SplitBBox: Measure R=%s<20" % R)
def testSplitPixel(self):
qref, Iref, s = self.ai.saxs(self.data, self.npt)
q, I, s = self.ai.saxs(self.data, self.npt, error_model="poisson", method="splitpixel")
self.assertTrue(q[0] > 0, "q[0]>0 %s" % q[0])
self.assertTrue(q[-1] < 8, "q[-1] < 8, got %s" % q[-1])
self.assertTrue(s.min() >= 0, "s.min() >= 0 got %s" % (s.min()))
self.assertTrue(s.max() < 21, "s.max() < 21 got %s" % (s.max()))
self.assertTrue(I.max() < 52000, "I.max() < 52000 got %s" % (I.max()))
self.assertTrue(I.min() >= 0, "I.min() >= 0 got %s" % (I.min()))
R = Rwp((q, I), (qref, Iref))
if R > 20: logger.error("SplitPixel has R=%s" % R)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.errorbar(q, I, s, label="SplitPixel R=%.1f" % R)
pylab.yscale("log")
self.assertEqual(R < 20, True, "SplitPixel: Measure R=%s<20" % R)
def test_suite_all_Saxs():
testSuite = unittest.TestSuite()
testSuite.addTest(TestSaxs("testMask"))
testSuite.addTest(TestSaxs("testNumpy"))
# testSuite.addTest(TestSaxs("testCython"))
testSuite.addTest(TestSaxs("testSplitBBox"))
testSuite.addTest(TestSaxs("testSplitPixel"))
# testSuite.addTest(TestSaxs("test_mask_splitBBox"))
# testSuite.addTest(TestSaxs("test_mask_splitBBox"))
# testSuite.addTest(TestSaxs("test_mask_splitBBox"))
# testSuite.addTest(TestSaxs("test_mask_splitBBox"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Saxs()
runner = unittest.TextTestRunner()
runner.run(mysuite)
if logger.getEffectiveLevel() == logging.DEBUG:
pylab.legend()
pylab.show()
raw_input()
pylab.clf()
pyFAI-0.11.0/test/test_flat.py 0000755 0001773 0001774 00000017200 12527541311 017146 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for dark_current / flat_field correction"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
import os
import numpy
import logging
import time
import sys
import fabio
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.opencl import ocl
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
class TestFlat1D(unittest.TestCase):
shape = 640, 480
flat = 1 + numpy.random.random(shape)
dark = numpy.random.random(shape)
raw = flat + dark
eps = 1e-6
ai = pyFAI.AzimuthalIntegrator()
ai.setFit2D(directDist=1, centerX=shape[1] // 2, centerY=shape[0] // 2, pixelX=1, pixelY=1)
bins = 500
def test_no_correct(self):
r, I = self.ai.integrate1d(self.raw, self.bins, unit="r_mm", correctSolidAngle=False)
logger.info("1D Without correction Imin=%s Imax=%s =%s std=%s" % (I.min(), I.max(), I.mean(), I.std()))
self.assertNotAlmostEqual(I.mean(), 1, 2, "Mean should not be 1")
self.assertFalse(I.max() - I.min() < self.eps, "deviation should be large")
def test_correct(self):
all_methods = ["numpy", "cython", "splitbbox", "splitpix", "lut", "csr"]
if ocl:
for device in ["cpu", "gpu", "acc"]:
if ocl.select_device(dtype=device):
all_methods.append("lut_ocl_%s" % device)
all_methods.append("csr_ocl_%s" % device)
for meth in all_methods:
r, I = self.ai.integrate1d(self.raw, self.bins, unit="r_mm", method=meth, correctSolidAngle=False, dark=self.dark, flat=self.flat)
logger.info("1D method:%s Imin=%s Imax=%s =%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assertAlmostEqual(I.mean(), 1, 2, "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < self.eps, "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
for meth in ["xrpd_numpy", "xrpd_cython", "xrpd_splitBBox", "xrpd_splitPixel"]: # , "xrpd_OpenCL" ]: bug with 32 bit GPU and request 64 bit integration
r, I = self.ai.__getattribute__(meth)(self.raw, self.bins, correctSolidAngle=False, dark=self.dark, flat=self.flat)
logger.info("1D method:%s Imin=%s Imax=%s =%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assertAlmostEqual(I.mean(), 1, 2, "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < self.eps, "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
if ocl and pyFAI.opencl.ocl.select_device("gpu", extensions=["cl_khr_fp64"]):
meth = "xrpd_OpenCL"
r, I = self.ai.__getattribute__(meth)(self.raw, self.bins, correctSolidAngle=False, dark=self.dark, flat=self.flat)
logger.info("1D method:%s Imin=%s Imax=%s =%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assertAlmostEqual(I.mean(), 1, 2, "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < self.eps, "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
class TestFlat2D(unittest.TestCase):
shape = 640, 480
flat = 1 + numpy.random.random(shape)
dark = numpy.random.random(shape)
raw = flat + dark
eps = 1e-6
ai = pyFAI.AzimuthalIntegrator()
ai.setFit2D(directDist=1, centerX=shape[1] // 2, centerY=shape[0] // 2, pixelX=1, pixelY=1)
bins = 500
azim = 360
def test_no_correct(self):
I, _ , _ = self.ai.integrate2d(self.raw, self.bins, self.azim, unit="r_mm", correctSolidAngle=False)
I = I[numpy.where(I > 0)]
logger.info("2D Without correction Imin=%s Imax=%s =%s std=%s" % (I.min(), I.max(), I.mean(), I.std()))
self.assertNotAlmostEqual(I.mean(), 1, 2, "Mean should not be 1")
self.assertFalse(I.max() - I.min() < self.eps, "deviation should be large")
def test_correct(self):
test2d = {"numpy": self.eps,
"cython": self.eps,
"splitbbox": self.eps,
"splitpix": self.eps,
"lut": self.eps,
}
if ocl:
for device in ["cpu", "gpu", "acc"]:
if ocl.select_device(dtype=device):
test2d["lut_ocl_%s" % device] = self.eps
test2d["csr_ocl_%s" % device] = self.eps
test2d_direct = {"xrpd2_numpy": 0.3, # histograms are very noisy in 2D
"xrpd2_histogram": 0.3, # histograms are very noisy in 2D
"xrpd2_splitBBox": self.eps,
"xrpd2_splitPixel": self.eps}
for meth in test2d:
logger.info("About to test2d %s" % meth)
try:
I, _, _ = self.ai.integrate2d(self.raw, self.bins, self.azim, unit="r_mm", method=meth, correctSolidAngle=False, dark=self.dark, flat=self.flat)
except (MemoryError, pyFAI.opencl.pyopencl.MemoryError):
logger.warning("Got MemoryError from OpenCL device")
continue
I = I[numpy.where(I > 0)]
logger.info("2D method:%s Imin=%s Imax=%s =%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assertAlmostEqual(I.mean(), 1, 2, "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < test2d[meth], "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
for meth in test2d_direct:
logger.info("About to test2d_direct %s" % meth)
I, _, _ = self.ai.__getattribute__(meth)(self.raw, self.bins, self.azim, correctSolidAngle=False, dark=self.dark, flat=self.flat)
I = I[numpy.where(I > 0)]
logger.info("1D method:%s Imin=%s Imax=%s =%s std=%s" % (meth, I.min(), I.max(), I.mean(), I.std()))
self.assert_(abs(I.mean() - 1) < test2d_direct[meth], "Mean should be 1 in %s" % meth)
self.assert_(I.max() - I.min() < test2d_direct[meth], "deviation should be small with meth %s, got %s" % (meth, I.max() - I.min()))
def test_suite_all_Flat():
testSuite = unittest.TestSuite()
testSuite.addTest(TestFlat1D("test_no_correct"))
testSuite.addTest(TestFlat1D("test_correct"))
testSuite.addTest(TestFlat2D("test_no_correct"))
testSuite.addTest(TestFlat2D("test_correct"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Flat()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/test_bispev.py 0000755 0001773 0001774 00000010134 12527541311 017507 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
from __future__ import absolute_import, division, print_function
"test suite for masked arrays"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "04/03/2015"
import unittest
import numpy
import logging
import sys
import fabio
import time
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
from pyFAI import spline, _bispev
from pyFAI.third_party import six
try:
from scipy.interpolate import fitpack
except:
fitpack = None
class TestBispev(unittest.TestCase):
spinefile = "1461/halfccd.spline"
def setUp(self):
"""Download files"""
self.splineFile = UtilsTest.getimage(self.__class__.spinefile)
self.spline = spline.Spline(self.splineFile)
self.spline.spline2array(timing=True)
def test_bispev(self):
x_1d_array = numpy.arange(self.spline.xmin, self.spline.xmax + 1)
y_1d_array = numpy.arange(self.spline.ymin, self.spline.ymax + 1)
t0 = time.time()
dx_ref = fitpack.bisplev(
x_1d_array, y_1d_array, [self.spline.xSplineKnotsX,
self.spline.xSplineKnotsY,
self.spline.xSplineCoeff,
self.spline.splineOrder,
self.spline.splineOrder],
dx=0, dy=0)
t1 = time.time()
logger.debug(self.spline.xSplineKnotsX.dtype)
logger.debug(self.spline.xSplineKnotsY.dtype)
logger.debug(self.spline.xSplineCoeff.dtype)
dx_loc = _bispev.bisplev(
x_1d_array, y_1d_array, [self.spline.xSplineKnotsX,
self.spline.xSplineKnotsY,
self.spline.xSplineCoeff,
self.spline.splineOrder,
self.spline.splineOrder],
)
t2 = time.time()
logger.debug("Scipy timings: %.3fs\t cython timings: %.3fs" % (t1 - t0, t2 - t1))
logger.debug("%s, %s" % (dx_ref.shape, dx_loc.shape))
logger.debug(dx_ref)
logger.debug(dx_loc)
logger.debug("delta = %s" % abs(dx_loc - dx_ref).max())
if logger.getEffectiveLevel() == logging.DEBUG:
fig = pylab.figure()
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.imshow(dx_ref)
ax2.imshow(dx_loc)
fig.show()
six.moves.input()
self.assert_(abs(dx_loc - dx_ref).max() < 2e-5, "Result are similar")
def test_suite_all_bispev():
testSuite = unittest.TestSuite()
testSuite.addTest(TestBispev("test_bispev"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_bispev()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up()
pyFAI-0.11.0/test/test_csr.py 0000644 0001773 0001774 00000013264 12527541311 017012 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"""tests for Jon's geometry changes
FIXME : make some tests that the functions do what is expected
"""
import unittest
import numpy
import os
import sys
import time
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger, diff_img, diff_crv
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI import opencl
from pyFAI import splitBBox
from pyFAI import splitBBoxCSR
if opencl.ocl:
from pyFAI import ocl_azim_csr
import fabio
N = 1000
ai = pyFAI.load(UtilsTest.getimage("1893/Pilatus1M.poni"))
data = fabio.open(UtilsTest.getimage("1883/Pilatus1M.edf")).data
ai.xrpd_LUT(data, N)
class ParameterisedTestCase(unittest.TestCase):
""" TestCase classes that want to be parameterised should
inherit from this class.
From Eli Bendersky's website
http://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases/
"""
def __init__(self, methodName='runTest', param=None):
super(ParameterisedTestCase, self).__init__(methodName)
self.param = param
@staticmethod
def parameterise(testcase_klass, param=None):
""" Create a suite containing all tests taken from the given
subclass, passing them the parameter 'param'.
"""
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_klass)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_klass(name, param=param))
return suite
class TestOpenclCSR(ParameterisedTestCase):
def test_csr(self):
workgroup_size = self.param
out_ref = pyFAI.splitBBox.histoBBox1d(data, ai._ttha, ai._dttha, bins=N)
csr = pyFAI.splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=N, unit="2th_deg")
if not opencl.ocl:
skip = True
else:
try:
ocl_csr = ocl_azim_csr.OCL_CSR_Integrator(csr.lut, data.size, "ALL", profile=True, block_size=workgroup_size)
out_ocl_csr = ocl_csr.integrate(data)
except (opencl.pyopencl.MemoryError, MemoryError):
logger.warning("Skipping test due to memory error on device")
skip = True
else:
skip = False
out_cyt_csr = csr.integrate(data)
cmt = "Testing ocl_csr with workgroup_size= %s" % (workgroup_size)
logger.debug(cmt)
if skip:
for ref, cyth in zip(out_ref, out_cyt_csr):
self.assertTrue(numpy.allclose(ref, cyth), cmt + ": hist vs csr")
else:
for ref, ocl, cyth in zip(out_ref[1:], out_ocl_csr, out_cyt_csr[1:]):
self.assertTrue(numpy.allclose(ref, ocl), cmt + ": hist vs ocl_csr")
self.assertTrue(numpy.allclose(ref, cyth), cmt + ": hist vs csr")
self.assertTrue(numpy.allclose(cyth, ocl), cmt + ": csr vs ocl_csr")
csr = None
ocl_csr = None
out_ocl_csr = None
out_ref = None
TESTCASES = [8 * 2 ** i for i in range(6)] # [8, 16, 32, 64, 128, 256]
class Test_CSR(unittest.TestCase):
def test_2d_splitbbox(self):
ai.reset()
img, tth, chi = ai.integrate2d(data, N, unit="2th_deg", method="splitbbox_LUT")
img_csr, tth_csr, chi_csr = ai.integrate2d(data, N, unit="2th_deg", method="splitbbox_csr")
self.assertTrue(numpy.allclose(tth, tth_csr), " 2Th are the same")
self.assertTrue(numpy.allclose(chi, chi_csr), " Chi are the same")
# TODO: align on splitbbox rather then splitbbox_csr
diff_img(img, img_csr, "splitbbox")
self.assertTrue(numpy.allclose(img, img_csr), " img are the same")
def test_2d_nosplit(self):
ai.reset()
img, tth, chi = ai.integrate2d(data, N, unit="2th_deg", method="histogram")
img_csr, tth_csr, chi_csr = ai.integrate2d(data, N, unit="2th_deg", method="nosplit_csr")
# diff_crv(tth, tth_csr, "2th")
# self.assertTrue(numpy.allclose(tth, tth_csr), " 2Th are the same")
# self.assertTrue(numpy.allclose(chi, chi_csr), " Chi are the same")
diff_img(img, img_csr, "no split")
self.assertTrue(numpy.allclose(img, img_csr), " img are the same")
def test_suite_all_OpenCL_CSR():
testSuite = unittest.TestSuite()
if opencl.ocl:
for param in TESTCASES:
testSuite.addTest(ParameterisedTestCase.parameterise(
TestOpenclCSR, param))
# if no opencl: no test
# testSuite.addTest(Test_CSR("test_2d_splitbbox"))
# testSuite.addTest(Test_CSR("test_2d_nosplit"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_OpenCL_CSR()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/profile_csr_fullsplit.py 0000644 0001773 0001774 00000002166 12527541311 021570 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio, pyopencl
from pylab import *
from six.moves import input
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_azim_csr
#from pyFAI import splitBBoxLUT
#from pyFAI import splitBBoxCSR
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/Pilatus1M.poni")
data = fabio.open("testimages/Pilatus1M.edf").data
ref = ai.xrpd_LUT(data, 1000)[1]
#obt = ai.xrpd_LUT_OCL(data, 1000)[1]
#ref = ai.integrate1d(data, 1000, method="ocl_csr", unit="2th_deg")[0]
pos = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos, 1000, unit="2th_deg")
boo = foo.integrate(data)[1]
foo2 = ocl_azim_csr.OCL_CSR_Integrator(foo.lut, data.size, "GPU", block_size=32)
boo2 = foo2.integrate(data)[0]
plot(ref, label="ocl_csr")
plot(boo, label="csr_fullsplit")
plot(boo2, label="ocl_csr_fullsplit")
legend()
show()
input()
pyFAI-0.11.0/test/test_multi_geometry.py 0000644 0001773 0001774 00000012363 12544200060 021257 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"""
Test suites for multi_geometry modules
"""
import unittest, numpy, os, sys, time, logging
if sys.version_info[0] > 2:
raw_input = input
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pyFAI.multi_geometry import MultiGeometry
from pyFAI.detectors import Detector
import fabio
class TestMultiGeometry(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.data = fabio.open(UtilsTest.getimage("1788/moke.tif")).data
self.lst_data = [self.data[:250, :300], self.data[250:, :300], self.data[:250, 300:], self.data[250:, 300:]]
self.det = Detector(1e-4, 1e-4)
self.det.max_shape = (500, 600)
self.sub_det = Detector(1e-4, 1e-4)
self.sub_det.max_shape = (250, 300)
self.ai = AzimuthalIntegrator(0.1, 0.03, 0.03, detector=self.det)
self.range = (0, 23)
self.ais = [AzimuthalIntegrator(0.1, 0.030, 0.03, detector=self.sub_det),
AzimuthalIntegrator(0.1, 0.005, 0.03, detector=self.sub_det),
AzimuthalIntegrator(0.1, 0.030, 0.00, detector=self.sub_det),
AzimuthalIntegrator(0.1, 0.005, 0.00, detector=self.sub_det),
]
self.mg = MultiGeometry(self.ais, radial_range=self.range, unit="2th_deg")
self.N = 390
def tearDown(self):
unittest.TestCase.tearDown(self)
self.data = None
self.lst_data = None
self.det = None
self.sub_det = None
self.ai = None
self.ais = None
self.mg = None
def test_integrate1d(self):
tth_ref, I_ref = self.ai.integrate1d(self.data, radial_range=self.range, npt=self.N, unit="2th_deg", method="splitpixel")
obt = self.mg.integrate1d(self.lst_data, self.N)
tth_obt, I_obt = obt
self.assertEqual(abs(tth_ref - tth_obt).max(), 0, "Bin position is the same")
# intensity need to be scaled by solid angle 1e-4*1e-4/0.1**2 = 1e-6
delta = (abs(I_obt * 1e6 - I_ref).max())
self.assert_(delta < 5e-5, "Intensity is the same delta=%s" % delta)
def test_integrate2d(self):
ref = self.ai.integrate2d(self.data, self.N, 360, radial_range=self.range, azimuth_range=(-180, 180), unit="2th_deg", method="splitpixel", all=True)
obt = self.mg.integrate2d(self.lst_data, self.N, 360, all=True)
self.assertEqual(abs(ref["radial"] - obt["radial"]).max(), 0, "Bin position is the same")
self.assertEqual(abs(ref["azimuthal"] - obt["azimuthal"]).max(), 0, "Bin position is the same")
# intensity need to be scaled by solid angle 1e-4*1e-4/0.1**2 = 1e-6
delta = abs(obt["I"] * 1e6 - ref["I"])[obt["count"] >= 1e-6] # restrict on valid pixel
delta_cnt = abs(obt["count"] - ref["count"])
delta_sum = abs(obt["sum"] * 1e6 - ref["sum"])
if delta.max() > 0:
logger.warning("TestMultiGeometry.test_integrate2d gave intensity difference of %s" % delta.max())
if logger.level <= logging.DEBUG:
from matplotlib import pyplot as plt
f = plt.figure()
a1 = f.add_subplot(2, 2, 1)
a1.imshow(ref["sum"])
a2 = f.add_subplot(2, 2, 2)
a2.imshow(obt["sum"])
a3 = f.add_subplot(2, 2, 3)
a3.imshow(delta_sum)
a4 = f.add_subplot(2, 2, 4)
a4.plot(delta_sum.sum(axis=0))
f.show()
raw_input()
self.assert_(delta_cnt.max() < 0.001, "pixel count is the same delta=%s" % delta_cnt.max())
self.assert_(delta_sum.max() < 0.03, "pixel sum is the same delta=%s" % delta_sum.max())
self.assert_(delta.max() < 0.004, "pixel intensity is the same (for populated pixels) delta=%s" % delta.max())
def test_suite_all_multi_geometry():
testSuite = unittest.TestSuite()
testSuite.addTest(TestMultiGeometry("test_integrate1d"))
testSuite.addTest(TestMultiGeometry("test_integrate2d"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_multi_geometry()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/profile_csr_all_platforms.py 0000644 0001773 0001774 00000003774 12527541311 022417 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio, pyopencl
from pylab import *
from six.moves import range
from six.moves import input
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitBBox
from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import ocl_azim_csr
def prof_inte(csr, data, device, block_size, repeat=10, nbr=3, platformid=None,deviceid=None ):
runtimes=[]
for foo in range(nbr):
t=[]
ocl_csr = ocl_azim_csr.OCL_CSR_Integrator(csr, data.size, device, profile=True, block_size=block_size, platformid=platformid, deviceid=deviceid )
for boo in range(repeat+1):
ocl_csr.integrate(data)
for e in ocl_csr.events:
if "integrate" in e[0]:
et = 1e-6 * (e[1].profile.end - e[1].profile.start)
# print("%50s:\t%.3fms" % (e[0], et))
t.append(et)
runtimes.append(numpy.average(t[1:]))
return numpy.min(runtimes)
if __name__ == "__main__":
ai = pyFAI.load("testimages/Pilatus1M.poni")
data = fabio.open("testimages/Pilatus1M.edf").data
ai.xrpd_LUT(data, 1000)[1]
t0 = time.time()
cyt_csr = pyFAI.splitBBoxCSR.HistoBBox1d(
ai._ttha,
ai._dttha,
bins=1000,
unit="2th_deg")
t1 = time.time()
timimgs={}
print("Time to create cython CSR: ", t1-t0)
block_sizes = [1,2,4,8,16,32,64,128]
for device in [(0,0),(1,0),(2,0),(2,1)]:
timimgs[device]=[]
for block_size in block_sizes:
t=prof_inte(cyt_csr.lut, data, "ALL", block_size, nbr=3, repeat=10, platformid=device[0], deviceid=device[1])
timimgs[device].append(t)
for i in timimgs:
plot(block_sizes, timimgs[i], label=str(i))
legend()
show()
input()
pyFAI-0.11.0/test/test_split_pixel.py 0000644 0001773 0001774 00000014706 12527541311 020561 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"""
Test suites for pixel splitting scheeme balidation
see debug_split_pixel.py for visual validation
"""
import unittest, numpy, os, sys, time
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger, Rwp
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
class TestSplitPixel(unittest.TestCase):
"""
"""
N = 10000
import pyFAI, numpy
img = numpy.zeros((512, 512))
for i in range(1, 6):img[i * 100, i * 100] = 1
det = pyFAI.detectors.Detector(1e-4, 1e-4)
det.shape = (512, 512)
ai = pyFAI.AzimuthalIntegrator(1, detector=det)
results = {}
for i, meth in enumerate(["numpy", "cython", "splitbbox", "splitpixel", "csr_no", "csr_bbox", "csr_full"]):
results[meth] = ai.integrate1d(img, 10000, method=meth, unit="2th_deg")
ai.reset()
def test_no_split(self):
"""
Validate that all non splitting algo give the same result...
"""
thres = 7
self.assert_(Rwp(self.results["numpy"], self.results["cython"]) < thres, "Cython/Numpy")
self.assert_(Rwp(self.results["csr_no"], self.results["cython"]) < thres, "Cython/CSR")
self.assert_(Rwp(self.results["csr_no"], self.results["numpy"]) < thres, "CSR/numpy")
self.assert_(Rwp(self.results["splitbbox"], self.results["numpy"]) > thres, "splitbbox/Numpy")
self.assert_(Rwp(self.results["splitpixel"], self.results["numpy"]) > thres, "splitpixel/Numpy")
self.assert_(Rwp(self.results["csr_bbox"], self.results["numpy"]) > thres, "csr_bbox/Numpy")
self.assert_(Rwp(self.results["csr_full"], self.results["numpy"]) > thres, "csr_full/Numpy")
self.assert_(Rwp(self.results["splitbbox"], self.results["cython"]) > thres, "splitbbox/cython")
self.assert_(Rwp(self.results["splitpixel"], self.results["cython"]) > thres, "splitpixel/cython")
self.assert_(Rwp(self.results["csr_bbox"], self.results["cython"]) > thres, "csr_bbox/cython")
self.assert_(Rwp(self.results["csr_full"], self.results["cython"]) > thres, "csr_full/cython")
self.assert_(Rwp(self.results["splitbbox"], self.results["csr_no"]) > thres, "splitbbox/csr_no")
self.assert_(Rwp(self.results["splitpixel"], self.results["csr_no"]) > thres, "splitpixel/csr_no")
self.assert_(Rwp(self.results["csr_bbox"], self.results["csr_no"]) > thres, "csr_bbox/csr_no")
self.assert_(Rwp(self.results["csr_full"], self.results["csr_no"]) > thres, "csr_full/csr_no")
def test_split_bbox(self):
"""
Validate that all bbox splitting algo give all the same result...
"""
thres = 7
self.assert_(Rwp(self.results["csr_bbox"], self.results["splitbbox"]) < thres, "csr_bbox/splitbbox")
self.assert_(Rwp(self.results["numpy"], self.results["splitbbox"]) > thres, "numpy/splitbbox")
self.assert_(Rwp(self.results["cython"], self.results["splitbbox"]) > thres, "cython/splitbbox")
self.assert_(Rwp(self.results["splitpixel"], self.results["splitbbox"]) > thres, "splitpixel/splitbbox")
self.assert_(Rwp(self.results["csr_no"], self.results["splitbbox"]) > thres, "csr_no/splitbbox")
self.assert_(Rwp(self.results["csr_full"], self.results["splitbbox"]) > thres, "csr_full/splitbbox")
self.assert_(Rwp(self.results["numpy"], self.results["csr_bbox"]) > thres, "numpy/csr_bbox")
self.assert_(Rwp(self.results["cython"], self.results["csr_bbox"]) > thres, "cython/csr_bbox")
self.assert_(Rwp(self.results["splitpixel"], self.results["csr_bbox"]) > thres, "splitpixel/csr_bbox")
self.assert_(Rwp(self.results["csr_no"], self.results["csr_bbox"]) > thres, "csr_no/csr_bbox")
self.assert_(Rwp(self.results["csr_full"], self.results["csr_bbox"]) > thres, "csr_full/csr_bbox")
def test_split_full(self):
"""
Validate that all full splitting algo give all the same result...
"""
thres = 7
self.assert_(Rwp(self.results["csr_full"], self.results["splitpixel"]) < thres, "csr_full/splitpixel")
self.assert_(Rwp(self.results["numpy"], self.results["splitpixel"]) > thres, "numpy/splitpixel")
self.assert_(Rwp(self.results["cython"], self.results["splitpixel"]) > thres, "cython/splitpixel")
self.assert_(Rwp(self.results["splitbbox"], self.results["splitpixel"]) > thres, "splitpixel/splitpixel")
self.assert_(Rwp(self.results["csr_no"], self.results["splitpixel"]) > thres, "csr_no/splitpixel")
self.assert_(Rwp(self.results["csr_bbox"], self.results["splitpixel"]) > thres, "csr_full/splitpixel")
self.assert_(Rwp(self.results["numpy"], self.results["csr_full"]) > thres, "numpy/csr_full")
self.assert_(Rwp(self.results["cython"], self.results["csr_full"]) > thres, "cython/csr_full")
self.assert_(Rwp(self.results["splitbbox"], self.results["csr_full"]) > thres, "splitpixel/csr_full")
self.assert_(Rwp(self.results["csr_no"], self.results["csr_full"]) > thres, "csr_no/csr_full")
self.assert_(Rwp(self.results["csr_bbox"], self.results["csr_full"]) > thres, "csr_full/csr_full")
def test_suite_all_split():
testSuite = unittest.TestSuite()
testSuite.addTest(TestSplitPixel("test_no_split"))
testSuite.addTest(TestSplitPixel("test_split_bbox"))
testSuite.addTest(TestSplitPixel("test_split_full"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_split()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up() pyFAI-0.11.0/test/test_histogram.py 0000755 0001773 0001774 00000041252 12527541311 020221 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for histogramming implementations"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "11/05/2015"
import unittest
import time
import numpy
import logging
import sys
import platform
from numpy import cos
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.histogram import histogram, histogram2d
from pyFAI.splitBBoxCSR import HistoBBox1d, HistoBBox2d
if logger.getEffectiveLevel() == logging.DEBUG:
import pylab
EPS32 = (1.0 + numpy.finfo(numpy.float32).eps)
class TestHistogram1d(unittest.TestCase):
"""basic test"""
shape = (512, 512)
npt = 500
size = shape[0] * shape[1]
maxI = 1000
epsilon = 1.0e-4
y, x = numpy.ogrid[:shape[0], :shape[1]]
tth = numpy.sqrt(x * x + y * y).astype("float32")
mod = 0.5 + 0.5 * cos(tth / 12) + 0.25 * cos(tth / 6) + 0.1 * cos(tth / 4)
data = (numpy.random.poisson(maxI, shape) * mod).astype("uint16")
data_sum = data.sum(dtype="float64")
t0 = time.time()
drange = (tth.min(), tth.max() * EPS32)
unweight_numpy, bin_edges = numpy.histogram(tth, npt, range=drange)
t1 = time.time()
weight_numpy, bin_edges = numpy.histogram(tth, npt, weights=data.astype("float64"), range=drange)
t2 = time.time()
logger.info("Timing for Numpy raw histogram: %.3f", t1 - t0)
logger.info("Timing for Numpy weighted histogram: %.3f", t2 - t1)
bins_numpy = 0.5 * (bin_edges[1:] + bin_edges[:-1])
I_numpy = weight_numpy / numpy.maximum(1.0, unweight_numpy)
t3 = time.time()
bins_cython, I_cython, weight_cython, unweight_cython = histogram(tth, data, npt, pixelSize_in_Pos=0)
t4 = time.time()
logger.info("Timing for Cython both histogram: %.3f", t4 - t3)
t3 = time.time()
integrator = HistoBBox1d(tth, delta_pos0=None, pos1=None, delta_pos1=None,
bins=npt, pos0Range=drange, allow_pos0_neg=False,
unit="undefined",)
t2 = time.time()
bins_csr, I_csr, weight_csr, unweight_csr = integrator.integrate(data)
t4 = time.time()
logger.info("Timing for CSR init: %.3fs, integrate: %0.3fs, both: %.3f", (t2 - t3), (t4 - t2), (t4 - t3))
def test_count_numpy(self):
"""
Test that the pixel count and the total intensity is conserved
in numpy implementation
"""
sump = self.unweight_numpy.sum(dtype="int64")
intensity_obt = self.weight_numpy.sum(dtype="float64")
delta = abs(sump - self.size)
logger.info("Numpy: Total number of points: %s (%s expected), delta = %s", sump, self.size, delta)
v = abs(intensity_obt - self.data_sum) / self.data_sum
logger.info("Numpy: Total Intensity: %s (%s expected), variation = %s", intensity_obt, self.data_sum, v)
self.assert_(delta == 0, msg="check all pixels were counted")
summed_weight_hist = self.weight_numpy.sum(dtype="float64")
self.assert_(summed_weight_hist == self.data_sum, msg="check all intensity is counted expected %s got %s" % (self.data_sum, summed_weight_hist))
self.assert_(v < self.epsilon, msg="checks delta is lower than %s, got %s" % (self.epsilon, v))
def test_count_cython(self):
"""
Test that the pixel count and the total intensity is conserved
in cython implementation
"""
sump = int(self.unweight_cython.sum(dtype="float64"))
intensity_obt = self.weight_cython.sum(dtype="float64")
delta = abs(sump - self.size)
logger.info("Cython: Total number of points: %s (%s expected), delta = %s", sump, self.size, delta)
v = abs(intensity_obt - self.data_sum) / self.data_sum
logger.info("Cython: Total Intensity: %s (%s expected), variation = %s", intensity_obt, self.data_sum, v)
self.assert_(delta == 0, msg="check all pixels were counted expected %s got %s" % (self.size, sump))
summed_weight_hist = self.weight_cython.sum(dtype="float64")
self.assert_(summed_weight_hist == self.data_sum, msg="check all intensity is counted expected %s got %s" % (self.data_sum, summed_weight_hist))
self.assertTrue(v < self.epsilon, msg="checks delta is lower than %s" % self.epsilon)
def test_count_csr(self):
"""
Test that the pixel count and the total intensity is conserved
in cSR sparse matrix multiplacation implementation
"""
sump = int(self.unweight_csr.sum(dtype="float64"))
intensity_obt = self.weight_csr.sum(dtype="float64")
delta = abs(sump - self.size)
logger.info("CSR: Total number of points: %s (%s expected), delta = %s", sump, self.size, delta)
v = abs(intensity_obt - self.data_sum) / self.data_sum
logger.info("CSR: Total Intensity: %s (%s expected), variation = %s", intensity_obt, self.data_sum, v)
self.assert_(delta == 0, msg="check all pixels were counted expected %s got %s" % (self.size, sump))
summed_weight_hist = self.weight_csr.sum(dtype="float64")
self.assert_(summed_weight_hist == self.data_sum, msg="check all intensity is counted expected %s got %s" % (self.data_sum, summed_weight_hist))
self.assertTrue(v < self.epsilon, msg="checks delta is lower than %s" % self.epsilon)
def test_numpy_vs_cython_vs_csr_1d(self):
"""
Compare numpy histogram with cython simple implementation ans CSR
"""
max_delta = abs(self.bins_numpy - self.bins_cython).max()
logger.info("Bin-center position for cython/numpy, max delta=%s", max_delta)
self.assert_(max_delta < self.epsilon, "Bin-center position for cython/numpy, max delta=%s" % max_delta)
max_delta = abs(self.bins_numpy - self.bins_csr).max()
logger.info("Bin-center position for csr/numpy, max delta=%s", max_delta)
self.assert_(max_delta < self.epsilon, "Bin-center position for csr/numpy, max delta=%s" % max_delta)
rwp1 = Rwp((self.bins_cython, self.I_cython), (self.bins_numpy, self.I_numpy))
logger.info("Rwp Cython/Numpy = %.3f" % rwp1)
self.assert_(rwp1 < self.epsilon, "Rwp Cython/Numpy = %.3f" % rwp1)
rwp2 = Rwp((self.bins_csr, self.I_csr), (self.bins_numpy, self.I_numpy))
logger.info("Rwp CSR/Numpy = %.3f" % rwp2)
self.assert_(rwp2 < 3, "Rwp Cython/Numpy = %.3f" % rwp2)
if logger.getEffectiveLevel() == logging.DEBUG:
logger.info("Plotting results")
fig = pylab.figure()
fig.suptitle('Numpy /Cython R=%.3f, Numpy/CSR R=%.3f' % (rwp1, rwp2))
sp = fig.add_subplot(111)
sp.plot(self.bins_numpy, self.I_numpy, "-b", label='numpy')
sp.plot(self.bins_cython, self.I_cython, "-r", label="cython")
sp.plot(self.bins_csr, self.I_csr, "-g", label="CSR")
handles, labels = sp.get_legend_handles_labels()
fig.legend(handles, labels)
fig.show()
raw_input("Press enter to quit")
delta_max = abs(self.unweight_numpy - self.unweight_cython).max()
logger.info("pixel count difference numpy/cython : max delta=%s", delta_max)
self.assert_(delta_max < 1, "numpy_vs_cython_1d max delta unweight = %s" % delta_max)
delta_max = abs(self.I_cython - self.I_numpy).max()
logger.info("Intensity count difference numpy/cython : max delta=%s", delta_max)
self.assert_(delta_max < self.epsilon, "Intensity count difference numpy/cython : max delta=%s" % delta_max)
# TODO: fix this !!!
delta_max = abs(self.unweight_numpy - self.unweight_csr).max()
if delta_max > 0:
logger.warning("pixel count difference numpy/csr : max delta=%s", delta_max)
self.assert_(delta_max < 10, "numpy_vs_csr_1d max delta unweight = %s" % delta_max)
delta_max = abs(self.I_csr - self.I_numpy).max()
if delta_max > self.epsilon:
logger.warning("Intensity count difference numpy/csr : max delta=%s", delta_max)
self.assert_(delta_max < 0.65, "Intensity count difference numpy/csr : max delta=%s" % delta_max)
class TestHistogram2d(unittest.TestCase):
"""basic test for 2D histogram"""
shape = (512, 512)
size = shape[0] * shape[1]
maxI = 1000
epsilon = 1.1e-4
y, x = numpy.ogrid[:shape[0], :shape[1]]
tth = numpy.sqrt(x * x + y * y).astype("float32")
mod = 0.5 + 0.5 * cos(tth / 12) + 0.25 * cos(tth / 6) + 0.1 * cos(tth / 4)
data = (numpy.random.poisson(maxI, shape) * mod).astype("uint16")
data_sum = data.sum(dtype="float64")
npt = (400, 360)
chi = numpy.arctan2(y, x).astype("float32")
drange = [[tth.min(), tth.max() * EPS32], [chi.min(), chi.max() * EPS32]]
t0 = time.time()
unweight_numpy, tth_edges, chi_edges = numpy.histogram2d(tth.flatten(), chi.flatten(), npt, range=drange)
t1 = time.time()
weight_numpy, tth_edges, chi_edges = numpy.histogram2d(tth.flatten(), chi.flatten(), npt, weights=data.astype("float64").flatten(), range=drange)
t2 = time.time()
logger.info("Timing for Numpy raw histogram2d: %.3f", t1 - t0)
logger.info("Timing for Numpy weighted histogram2d: %.3f", t2 - t1)
tth_numpy = 0.5 * (tth_edges[1:] + tth_edges[:-1])
chi_numpy = 0.5 * (chi_edges[1:] + chi_edges[:-1])
I_numpy = weight_numpy / numpy.maximum(1.0, unweight_numpy)
t3 = time.time()
I_cython, tth_cython, chi_cython, weight_cython, unweight_cython = histogram2d(tth.flatten(), chi.flatten(), npt, data.flatten(), split=0)
t4 = time.time()
logger.info("Timing for Cython both histogram2d: %.3f", t4 - t3)
t3 = time.time()
integrator = HistoBBox2d(tth, None, chi, delta_pos1=None,
bins=npt, allow_pos0_neg=False, unit="undefined")
t2 = time.time()
I_csr, tth_csr, chi_csr, weight_csr, unweight_csr = integrator.integrate(data)
t4 = time.time()
logger.info("Timing for CSR init: %.3fs, integrate: %0.3fs, both: %.3f", (t2 - t3), (t4 - t2), (t4 - t3))
if platform.system() == "Linux":
err_max_cnt = 0
else:
# Under windows or MacOSX, up to 1 bin error has been reported...
err_max_cnt = 1
def test_count_numpy(self):
"""
Test that the pixel count and the total intensity is conserved
in numpy implementation
"""
sump = self.unweight_numpy.sum(dtype="int64")
intensity_obt = self.weight_numpy.sum(dtype="float64")
delta = abs(sump - self.size)
logger.info("Numpy: Total number of points: %s (%s expected), delta = %s", sump, self.size, delta)
v = abs(intensity_obt - self.data_sum) / self.data_sum
logger.info("Numpy: Total Intensity: %s (%s expected), variation = %s", intensity_obt, self.data_sum, v)
self.assert_(delta == 0, "Numpy: Total number of points: %s (%s expected), delta = %s" % (sump, self.size, delta))
self.assert_(v < self.epsilon, "Numpy: Total Intensity: %s (%s expected), variation = %s" % (intensity_obt, self.data_sum, v))
def test_count_cython(self):
"""
Test that the pixel count and the total intensity is conserved
in cython implementation
"""
sump = int(self.unweight_cython.sum(dtype="int64"))
intensity_obt = self.weight_cython.sum(dtype="float64")
delta = abs(sump - self.size)
logger.info("Cython: Total number of points: %s (%s expected), delta = %s", sump, self.size, delta)
v = abs(intensity_obt - self.data_sum) / self.data_sum
logger.info("Cython: Total Intensity: %s (%s expected), variation = %s", intensity_obt, self.data_sum, v)
self.assert_(delta == 0, msg="check all pixels were counted")
self.assert_(v < self.epsilon, msg="checks delta is lower than %s" % self.epsilon)
def test_count_csr(self):
"""
Test that the pixel count and the total intensity is conserved
in csr implementation
"""
sump = int(self.unweight_csr.sum(dtype="int64"))
intensity_obt = self.weight_csr.sum(dtype="float64")
delta = abs(sump - self.size)
logger.info("CSR: Total number of points: %s (%s expected), delta = %s", sump, self.size, delta)
v = abs(intensity_obt - self.data_sum) / self.data_sum
logger.info("CSR: Total Intensity: %s (%s expected), variation = %s", intensity_obt, self.data_sum, v)
self.assert_(delta == 0, msg="check all pixels were counted")
self.assert_(v < self.epsilon, msg="checks delta is lower than %s" % self.epsilon)
def test_numpy_vs_cython_vs_csr_2d(self):
"""
Compare numpy histogram with cython simple implementation
"""
max_delta = abs(self.tth_numpy - self.tth_cython).max()
logger.info("Bin-center position for cython/numpy tth, max delta=%s", max_delta)
self.assert_(max_delta < self.epsilon, "Bin-center position for cython/numpy tth, max delta=%s" % max_delta)
max_delta = abs(self.chi_numpy - self.chi_cython).max()
logger.info("Bin-center position for cython/numpy chi, max delta=%s", max_delta)
self.assert_(max_delta < self.epsilon, "Bin-center position for cython/numpy chi, max delta=%s" % max_delta)
delta_max = abs(self.unweight_numpy - self.unweight_cython).max()
logger.info("pixel count difference numpy/cython : max delta=%s", delta_max)
if delta_max > 0:
logger.warning("pixel count difference numpy/cython : max delta=%s", delta_max)
self.assert_(delta_max <= self.err_max_cnt, "pixel count difference numpy/cython : max delta=%s" % delta_max)
delta_max = abs(self.I_cython - self.I_numpy).max()
logger.info("Intensity count difference numpy/cython : max delta=%s", delta_max)
self.assert_(delta_max < (self.err_max_cnt + self.epsilon) * self.maxI, "Intensity count difference numpy/cython : max delta=%s>%s" % (delta_max, (self.err_max_cnt + self.epsilon) * self.maxI))
max_delta = abs(self.tth_numpy - self.tth_csr).max()
logger.info("Bin-center position for csr/numpy tth, max delta=%s", max_delta)
self.assert_(max_delta < self.epsilon, "Bin-center position for csr/numpy tth, max delta=%s" % max_delta)
max_delta = abs(self.chi_numpy - self.chi_csr).max()
logger.info("Bin-center position for csr/numpy chi, max delta=%s", max_delta)
self.assert_(max_delta < self.epsilon, "Bin-center position for csr/numpy chi, max delta=%s" % max_delta)
delta_max = abs(self.unweight_numpy - self.unweight_csr.T).max()
if delta_max > 0:
logger.warning("pixel count difference numpy/csr : max delta=%s", delta_max)
self.assert_(delta_max <= self.err_max_cnt + 1, "pixel count difference numpy/csr : max delta=%s" % delta_max)
delta_max = abs(self.I_csr.T - self.I_numpy).max()
if delta_max > self.epsilon:
logger.warning("Intensity count difference numpy/csr : max delta=%s", delta_max)
self.assert_(delta_max < 31, "Intensity count difference numpy/csr : max delta=%s" % delta_max)
def test_suite_all_Histogram():
testSuite = unittest.TestSuite()
testSuite.addTest(TestHistogram1d("test_count_numpy"))
testSuite.addTest(TestHistogram1d("test_count_cython"))
testSuite.addTest(TestHistogram1d("test_count_csr"))
testSuite.addTest(TestHistogram1d("test_numpy_vs_cython_vs_csr_1d"))
testSuite.addTest(TestHistogram2d("test_count_numpy"))
testSuite.addTest(TestHistogram2d("test_count_cython"))
testSuite.addTest(TestHistogram2d("test_count_csr"))
testSuite.addTest(TestHistogram2d("test_numpy_vs_cython_vs_csr_2d"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Histogram()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/test_utils.py 0000755 0001773 0001774 00000015420 12527541311 017362 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
from __future__ import division, print_function, absolute_import
"test suite for utilities library"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "07/05/2015"
import unittest
import numpy
import sys
import os
import fabio
import tempfile
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger, recursive_delete
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
import pyFAI.utils
# if logger.getEffectiveLevel() <= logging.INFO:
# from pyFAI.gui_utils import pylab
import scipy.ndimage
# TODO Test:
# gaussian_filter
# relabel
# boundingBox
# removeSaturatedPixel
# DONE:
# # binning
# # unbinning
# # shift
# # shiftFFT
# # measure_offset
# # averageDark
# # averageImages
class TestUtils(unittest.TestCase):
unbinned = numpy.random.random((64, 32))
dark = unbinned.astype("float32")
flat = 1 + numpy.random.random((64, 32))
raw = flat + dark
tmp_dir = UtilsTest.tempdir
tmp_file = os.path.join(tmp_dir, "testUtils_average.edf")
def test_binning(self):
"""
test the binning and unbinning functions
"""
binned = pyFAI.utils.binning(self.unbinned, (4, 2))
self.assertEqual(binned.shape, (64 // 4, 32 // 2), "binned size is OK")
unbinned = pyFAI.utils.unBinning(binned, (4, 2))
self.assertEqual(unbinned.shape, self.unbinned.shape, "unbinned size is OK")
self.assertAlmostEqual(unbinned.sum(), self.unbinned.sum(), 2, "content is the same")
def test_averageDark(self):
"""
Some testing for dark averaging
"""
one = pyFAI.utils.averageDark([self.dark])
self.assertEqual(abs(self.dark - one).max(), 0, "data are the same")
two = pyFAI.utils.averageDark([self.dark, self.dark])
self.assertEqual(abs(self.dark - two).max(), 0, "data are the same: mean test")
three = pyFAI.utils.averageDark([numpy.ones_like(self.dark), self.dark, numpy.zeros_like(self.dark)], "median")
self.assertEqual(abs(self.dark - three).max(), 0, "data are the same: median test")
four = pyFAI.utils.averageDark([numpy.ones_like(self.dark), self.dark, numpy.zeros_like(self.dark)], "min")
self.assertEqual(abs(numpy.zeros_like(self.dark) - four).max(), 0, "data are the same: min test")
five = pyFAI.utils.averageDark([numpy.ones_like(self.dark), self.dark, numpy.zeros_like(self.dark)], "max")
self.assertEqual(abs(numpy.ones_like(self.dark) - five).max(), 0, "data are the same: max test")
six = pyFAI.utils.averageDark([numpy.ones_like(self.dark), self.dark, numpy.zeros_like(self.dark), self.dark, self.dark], "median", .001)
self.assert_(abs(self.dark - six).max() < 1e-4, "data are the same: test threshold")
seven = pyFAI.utils.averageImages([self.raw], darks=[self.dark], flats=[self.flat], threshold=0, output=self.tmp_file)
self.assert_(abs(numpy.ones_like(self.dark) - fabio.open(seven).data).mean() < 1e-2, "averageImages")
def test_shift(self):
"""
Some testing for image shifting and offset measurement functions.
"""
ref = numpy.ones((11, 12))
ref[2, 3] = 5
res = numpy.ones((11, 12))
res[5, 7] = 5
delta = (5 - 2, 7 - 3)
self.assert_(abs(pyFAI.utils.shift(ref, delta) - res).max() < 1e-12, "shift with integers works")
self.assert_(abs(pyFAI.utils.shiftFFT(ref, delta) - res).max() < 1e-12, "shift with FFTs works")
self.assert_(pyFAI.utils.measure_offset(res, ref) == delta, "measure offset works")
def test_gaussian_filter(self):
"""
Check gaussian filters applied via FFT
"""
for sigma in [2, 9.0 / 8.0]:
for mode in ["wrap", "reflect", "constant", "nearest", "mirror"]:
blurred1 = scipy.ndimage.filters.gaussian_filter(self.flat, sigma, mode=mode)
blurred2 = pyFAI.utils.gaussian_filter(self.flat, sigma, mode=mode)
delta = abs((blurred1 - blurred2) / (blurred1)).max()
logger.info("Error for gaussian blur sigma: %s with mode %s is %s" % (sigma, mode, delta))
self.assert_(delta < 6e-5, "Gaussian blur sigma: %s in %s mode are the same, got %s" % (sigma, mode, delta))
def test_set(self):
s = pyFAI.utils.FixedParameters()
self.assertEqual(len(s), 0, "initial set is empty")
s.add_or_discard("a", True)
self.assertEqual(len(s), 1, "a is in set")
s.add_or_discard("a", None)
self.assertEqual(len(s), 1, "set is untouched")
s.add_or_discard("a", False)
self.assertEqual(len(s), 0, "set is empty again")
s.add_or_discard("a", None)
self.assertEqual(len(s), 0, "set is untouched")
s.add_or_discard("a", False)
self.assertEqual(len(s), 0, "set is still empty")
def test_expand2d(self):
vect = numpy.arange(10.)
size2 = 11
self.assert_((numpy.outer(vect, numpy.ones(size2)) == pyFAI.utils.expand2d(vect, size2, False)).all(), "horizontal vector expand")
self.assert_((numpy.outer(numpy.ones(size2), vect) == pyFAI.utils.expand2d(vect, size2, True)).all(), "vertical vector expand")
def test_suite_all_Utils():
testSuite = unittest.TestSuite()
testSuite.addTest(TestUtils("test_binning"))
testSuite.addTest(TestUtils("test_averageDark"))
testSuite.addTest(TestUtils("test_shift"))
testSuite.addTest(TestUtils("test_gaussian_filter"))
testSuite.addTest(TestUtils("test_set"))
testSuite.addTest(TestUtils("test_expand2d"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Utils()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up()
pyFAI-0.11.0/test/test_marchingsquares.py 0000644 0001773 0001774 00000005013 12527541311 021410 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for marching_squares / isocontour"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
import os
import numpy
import logging, time
import sys
import fabio
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.marchingsquares import isocontour
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
class TestMarchingSquares(unittest.TestCase):
def test_isocontour(self):
ref = 50
y, x = numpy.ogrid[-100:100:0.1, -100:100:0.1]
r = numpy.sqrt(x * x + y * y)
c = isocontour(r, ref)
self.assertNotEqual(0, len(c), "controur plot contains not point")
i = numpy.round(c).astype(numpy.int32)
self.assert_(abs(r[(i[:, 0], i[:, 1])] - ref).max() < 0.05, "contour plot not working correctly")
if logger.getEffectiveLevel() <= logging.INFO:
pylab.imshow(r)
pylab.plot(c[:, 1], c[:, 0], ",")
pylab.show()
def test_suite_all_marchingsquares():
testSuite = unittest.TestSuite()
testSuite.addTest(TestMarchingSquares("test_isocontour"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_marchingsquares()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/splitpixel_vs_splitpixelfull.py 0000644 0001773 0001774 00000001152 12440262356 023225 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
import pyFAI
import numpy
from pyFAI import splitPixelFull, splitPixel
ai = pyFAI.AzimuthalIntegrator(detector="Fairchild")
shape = (2048, 2048)
data = numpy.zeros(shape)
data[100, 200] = 1
tth, I = ai.integrate1d(data, 10000, correctSolidAngle=False, method="splitpixel", unit="2th_deg")
res_splitPixelFull = splitPixelFull.fullSplit1D(ai._corner4Da, data, bins=10000)
res_splitPixel = splitPixel.fullSplit1D(ai._corner4Da, data, bins=10000)
for i, ary in enumerate(("tth", "I", "unweight", "weight")):
print("Error on %s: %s" % (i, abs(res_splitPixelFull[i] - res_splitPixel[i]).max()))
pyFAI-0.11.0/test/test_bug_regression.py 0000644 0001773 0001774 00000005330 12527541311 021233 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"""test suite for non regression on some bugs.
Please refer to their respective bug number
https://github.com/kif/pyFAI/issues
"""
__author__ = "Jérôme Kieffer"
__contact__ = "Jérôme Kieffer"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/02/2015"
import sys
import os
import unittest
import numpy
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from utilstest import getLogger, UtilsTest#, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
class TestBug170(unittest.TestCase):
"""
Test a mar345 image with 2300 pixels size
"""
poni = """
Detector: Mar345
PixelSize1: 0.00015
PixelSize2: 0.00015
Distance: 0.446642915189
Poni1: 0.228413453499
Poni2: 0.272291324302
Rot1: 0.0233130647508
Rot2: 0.0011735285628
Rot3: -7.22446379865e-08
SplineFile: None
Wavelength: 7e-11
"""
def setUp(self):
self.ponifile = os.path.join(UtilsTest.tempdir, "bug170.poni")
with open(self.ponifile, "w") as poni:
poni.write(self.poni)
self.data = numpy.random.random((2300,2300))
def test_bug170(self):
ai = pyFAI.load(self.ponifile)
logger.debug(ai.mask.shape)
logger.debug(ai.detector.pixel1)
logger.debug(ai.detector.pixel2)
ai.integrate1d(self.data, 2000)
def tearDown(self):
if os.path.exists(self.ponifile):
os.unlink(self.ponifile)
self.data = None
def test_suite_bug_regression():
testSuite = unittest.TestSuite()
testSuite.addTest(TestBug170("test_bug170"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_bug_regression()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/all_testimages.json 0000644 0001773 0001774 00000001103 12527541311 020467 0 ustar kieffer kieffer 0000000 0000000 ["1883/Pilatus1M.edf", "2454/halfccd.fit2d.edf", "1880/Fairchild.edf", "1894/testMask.edf", "1460/fit2d.dat", "1490/bioSaxsMaskOnly.edf", "1492/bsa_013_01.edf", "1491/Pcon_01Apr_msk.edf", "2549/powder_200_2_0001.cbf", "1788/moke.tif", "1896/Frelon2k.poni", "1882/halfccd.edf", "1881/Frelon2k.edf", "1884/Pilatus6M.cbf", "1893/Pilatus1M.poni", "1897/Pilatus6M.poni", "1463/LaB6.poni", "1489/bioSaxs.poni", "1488/bioSaxsMaskDummy.edf", "1900/frelon.spline", "2548/powder_200_2_0001.chi", "1461/halfccd.spline", "1895/halfccd.poni", "2201/LaB6_260210.mar3450", "1464/LaB6_0020.edf"] pyFAI-0.11.0/test/test_integrate.py 0000755 0001773 0001774 00000017477 12527541311 020222 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for masked arrays"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
import os
import numpy
import logging, time
import sys
import fabio
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
class TestIntegrate1D(unittest.TestCase):
npt = 1000
img = UtilsTest.getimage("1883/Pilatus1M.edf")
data = fabio.open(img).data
ai = pyFAI.AzimuthalIntegrator(1.58323111834, 0.0334170169115, 0.0412277798782, 0.00648735642526, 0.00755810191106, 0.0, detector=pyFAI.detectors.Pilatus1M())
ai.wavelength = 1e-10
Rmax = 3
def testQ(self):
res = {}
for m in ("numpy", "cython", "BBox" , "splitpixel", "lut", "lut_ocl"):
res[m] = self.ai.integrate1d(self.data, self.npt, method=m, radial_range=(0.5, 5.8))
for a in res:
for b in res:
R = Rwp(res[a], res[b])
mesg = "testQ: %s vs %s measured R=%s<%s" % (a, b, R, self.Rmax)
if R > self.Rmax:
logger.error(mesg)
else:
logger.info(mesg)
self.assertTrue(R <= self.Rmax, mesg)
def testR(self):
res = {}
for m in ("numpy", "cython", "BBox" , "splitpixel", "lut", "lut_ocl"):
res[m] = self.ai.integrate1d(self.data, self.npt, method=m, unit="r_mm", radial_range=(20, 150))
for a in res:
for b in res:
R = Rwp(res[a], res[b])
mesg = "testR: %s vs %s measured R=%s<%s" % (a, b, R, self.Rmax)
if R > self.Rmax:
logger.error(mesg)
else:
logger.info(mesg)
self.assertTrue(R <= self.Rmax, mesg)
def test2th(self):
res = {}
for m in ("numpy", "cython", "BBox" , "splitpixel", "lut", "lut_ocl"):
res[m] = self.ai.integrate1d(self.data, self.npt, method=m, unit="2th_deg", radial_range=(0.5, 5.5))
for a in res:
for b in res:
R = Rwp(res[a], res[b])
mesg = "test2th: %s vs %s measured R=%s<%s" % (a, b, R, self.Rmax)
if R > self.Rmax:
logger.error(mesg)
else:
logger.info(mesg)
self.assertTrue(R <= self.Rmax, mesg)
class TestIntegrate2D(unittest.TestCase):
npt = 500
img = UtilsTest.getimage("1883/Pilatus1M.edf")
data = fabio.open(img).data
ai = pyFAI.AzimuthalIntegrator(1.58323111834, 0.0334170169115, 0.0412277798782, 0.00648735642526, 0.00755810191106, 0.0, detector=pyFAI.detectors.Pilatus1M())
ai.wavelength = 1e-10
Rmax = 20
delta_pos_azim_max = 0.28
def testQ(self):
res = {}
for m in ("numpy", "cython", "BBox" , "splitpixel"): # , "lut", "lut_ocl"):
res[m] = self.ai.integrate2d(self.data, self.npt, method=m)
mask = (res["numpy"][0] != 0)
self.assertTrue(mask.sum() > 36 * self.npt, "10%% of the pixels are valid at least")
for a in res:
for b in res:
delta_pos_rad = abs(res[a][1] - res[b][1]).max()
delta_pos_azim = abs(res[a][2] - res[b][2]).max()
R = abs((res[a][0][mask] - res[b][0][mask]) / numpy.maximum(1, res[a][0][mask])).mean() * 100
mesg = "testQ 2D: %s vs %s measured delta rad=%s azim=%s R=%s<%s" % (a, b, delta_pos_rad, delta_pos_azim, R, self.Rmax)
if R > self.Rmax:
logger.error(mesg)
else:
logger.info(mesg)
self.assertTrue(delta_pos_rad <= 0.01, mesg)
self.assertTrue(delta_pos_azim <= self.delta_pos_azim_max, mesg)
self.assertTrue(R <= self.Rmax, mesg)
def testR(self):
res = {}
for m in ("numpy", "cython", "BBox" , "splitpixel"): # , "lut", "lut_ocl"):
res[m] = self.ai.integrate2d(self.data, self.npt, method=m, unit="r_mm") # , radial_range=(20, 150))
mask = (res["numpy"][0] != 0)
self.assertTrue(mask.sum() > 36 * self.npt, "10%% of the pixels are valid at least")
for a in res:
for b in res:
delta_pos_rad = abs(res[a][1] - res[b][1]).max()
delta_pos_azim = abs(res[a][2] - res[b][2]).max()
R = abs((res[a][0][mask] - res[b][0][mask]) / numpy.maximum(1, res[a][0][mask])).mean() * 100
mesg = "testR 2D: %s vs %s measured delta rad=%s azim=%s R=%s<%s" % (a, b, delta_pos_rad, delta_pos_azim, R, self.Rmax)
if R > self.Rmax:
logger.error(mesg)
else:
logger.info(mesg)
self.assertTrue(delta_pos_rad <= 0.28, mesg)
self.assertTrue(delta_pos_azim <= self.delta_pos_azim_max, mesg)
self.assertTrue(R <= self.Rmax, mesg)
def test2th(self):
res = {}
for m in ("numpy", "cython", "BBox" , "splitpixel"): # , "lut", "lut_ocl"):
res[m] = self.ai.integrate2d(self.data, self.npt, method=m, unit="2th_deg") # , radial_range=(0.5, 5.5))
mask = (res["numpy"][0] != 0)
self.assertTrue(mask.sum() > 36 * self.npt, "10%% of the pixels are valid at least")
for a in res:
for b in res:
if a == b:
continue
delta_pos_rad = abs(res[a][1] - res[b][1]).max()
delta_pos_azim = abs(res[a][2] - res[b][2]).max()
R = abs((res[a][0][mask] - res[b][0][mask]) / numpy.maximum(1, res[a][0][mask])).mean() * 100
mesg = "test2th 2D: %s vs %s measured delta rad=%s azim=%s R=%s<%s" % (a, b, delta_pos_rad, delta_pos_azim, R, self.Rmax)
if R > self.Rmax:
logger.error(mesg)
else:
logger.info(mesg)
self.assertTrue(delta_pos_rad <= 0.01, mesg)
self.assertTrue(R <= self.Rmax, mesg)
def test_suite_all_Integrate1d():
testSuite = unittest.TestSuite()
testSuite.addTest(TestIntegrate1D("testQ"))
testSuite.addTest(TestIntegrate1D("testR"))
testSuite.addTest(TestIntegrate1D("test2th"))
testSuite.addTest(TestIntegrate2D("testQ"))
testSuite.addTest(TestIntegrate2D("testR"))
testSuite.addTest(TestIntegrate2D("test2th"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Integrate1d()
runner = unittest.TextTestRunner()
runner.run(mysuite)
# if logger.getEffectiveLevel() == logging.DEBUG:
# pylab.legend()
# pylab.show()
# raw_input()
# pylab.clf()
pyFAI-0.11.0/test/test_export.py 0000755 0001773 0001774 00000011341 12527541311 017541 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for masked arrays"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import fabio
import logging, time
import numpy
import os
import sys
import unittest
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
def testExport(direct=100, centerX=900, centerY=1000, tilt=0, tpr=0, pixelX=50, pixelY=60):
a1 = pyFAI.AzimuthalIntegrator()
a2 = pyFAI.AzimuthalIntegrator()
a3 = pyFAI.AzimuthalIntegrator()
a1.setFit2D(direct, centerX, centerY, tilt, tpr, pixelX, pixelY)
# print a1
a2.setPyFAI(**a1.getPyFAI())
a3.setFit2D(**a2.getFit2D())
res = ""
for e, o in [(a1, a2), (a1, a3), (a2, a3)]:
for key in ["dist", "poni1", "poni2", "rot1", "rot2", "rot3", "pixel1", "pixel2", "splineFile"]:
refv = e.__getattribute__(key)
obtv = o.__getattribute__(key)
try:
if round(abs(float(refv) - float(obtv))) != 0:
res += "%s: %s != %s" % (key, refv, obtv)
except TypeError as error:
if refv != obtv:
res += "%s: %s != %s" % (key, refv, obtv)
return res
class TestFIT2D(unittest.TestCase):
poniFile = "1893/Pilatus1M.poni"
def setUp(self):
"""Download files"""
self.poniFile = UtilsTest.getimage(self.__class__.poniFile)
def test_simple(self):
ref = pyFAI.load(self.poniFile)
obt = pyFAI.AzimuthalIntegrator()
obt.setFit2D(**ref.getFit2D())
for key in ["dist", "poni1", "poni2", "rot1", "rot2", "rot3", "pixel1", "pixel2", "splineFile"]:
refv = ref.__getattribute__(key)
obtv = obt.__getattribute__(key)
if refv is None:
self.assertEqual(refv, obtv , "%s: %s != %s" % (key, refv, obtv))
else:
self.assertAlmostEqual(refv, obtv , 4, "%s: %s != %s" % (key, refv, obtv))
def test_export(self):
res = testExport()
self.assertFalse(res, res)
res = testExport(tilt=20)
self.assertFalse(res, res)
res = testExport(tilt=20, tpr=80)
self.assertFalse(res, res)
res = testExport(tilt=20, tpr=580)
self.assertFalse(res, res)
class TestSPD(unittest.TestCase):
poniFile = "1893/Pilatus1M.poni"
def setUp(self):
"""Download files"""
self.poniFile = UtilsTest.getimage(self.__class__.poniFile)
def test_simple(self):
ref = pyFAI.load(self.poniFile)
# ref.rot1 = 0
# ref.rot2 = 0
# ref.rot3 = 0
obt = pyFAI.AzimuthalIntegrator()
# print ref.getFit2D()
# print ref.getSPD()
obt.setSPD(**ref.getSPD())
# print obt.getSPD()
for key in ["dist", "poni1", "poni2", "rot3", "pixel1", "pixel2", "splineFile"]:
refv = ref.__getattribute__(key)
obtv = obt.__getattribute__(key)
if refv is None:
self.assertEqual(refv, obtv , "%s: %s != %s" % (key, refv, obtv))
else:
self.assertAlmostEqual(refv, obtv , 4, "%s: %s != %s" % (key, refv, obtv))
def test_suite_all_Export():
testSuite = unittest.TestSuite()
testSuite.addTest(TestFIT2D("test_simple"))
testSuite.addTest(TestFIT2D("test_export"))
testSuite.addTest(TestSPD("test_simple"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Export()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
pyFAI-0.11.0/test/test_all.py 0000755 0001773 0001774 00000011243 12544200060 016761 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
from __future__ import absolute_import, division, print_function
"""
Test suite for all pyFAI modules.
"""
__authors__ = ["Jérôme Kieffer"]
__contact__ = "jerome.kieffer@esrf.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "27/05/2015"
import sys
import os
import unittest
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger
logger = getLogger("test_all")
from .test_dummy import test_suite_all_dummy
from .test_geometry_refinement import test_suite_all_GeometryRefinement
from .test_azimuthal_integrator import test_suite_all_AzimuthalIntegration
from .test_histogram import test_suite_all_Histogram
from .test_peak_picking import test_suite_all_PeakPicking
from .test_geometry import test_suite_all_Geometry
from .test_mask import test_suite_all_Mask
from .test_openCL import test_suite_all_OpenCL
from .test_export import test_suite_all_Export
from .test_saxs import test_suite_all_Saxs
from .test_integrate import test_suite_all_Integrate1d
from .test_bilinear import test_suite_all_bilinear
from .test_distortion import test_suite_all_distortion
from .test_flat import test_suite_all_Flat
from .test_utils import test_suite_all_Utils
from .test_polarization import test_suite_all_polarization
from .test_detector import test_suite_all_detectors
from .test_convolution import test_suite_all_convolution
from .test_sparse import test_suite_all_sparse
from .test_csr import test_suite_all_OpenCL_CSR
from .test_blob_detection import test_suite_all_blob_detection
from .test_marchingsquares import test_suite_all_marchingsquares
from .test_io import test_suite_all_io
from .test_calibrant import test_suite_all_calibrant
from .test_split_pixel import test_suite_all_split
from .test_bispev import test_suite_all_bispev
from .test_bug_regression import test_suite_bug_regression
from .test_multi_geometry import test_suite_all_multi_geometry
from .test_watershed import test_suite_all_watershed
def test_suite_all():
testSuite = unittest.TestSuite()
testSuite.addTest(test_suite_all_dummy())
testSuite.addTest(test_suite_all_Histogram())
testSuite.addTest(test_suite_all_GeometryRefinement())
testSuite.addTest(test_suite_all_AzimuthalIntegration())
testSuite.addTest(test_suite_all_PeakPicking())
testSuite.addTest(test_suite_all_Geometry())
testSuite.addTest(test_suite_all_Mask())
testSuite.addTest(test_suite_all_OpenCL())
testSuite.addTest(test_suite_all_Export())
testSuite.addTest(test_suite_all_Saxs())
testSuite.addTest(test_suite_all_Integrate1d())
testSuite.addTest(test_suite_all_bilinear())
testSuite.addTest(test_suite_all_distortion())
testSuite.addTest(test_suite_all_Flat())
testSuite.addTest(test_suite_all_Utils())
testSuite.addTest(test_suite_all_detectors())
testSuite.addTest(test_suite_all_convolution())
testSuite.addTest(test_suite_all_sparse())
testSuite.addTest(test_suite_all_OpenCL_CSR())
testSuite.addTest(test_suite_all_blob_detection())
testSuite.addTest(test_suite_all_marchingsquares())
testSuite.addTest(test_suite_all_io())
testSuite.addTest(test_suite_all_calibrant())
testSuite.addTest(test_suite_all_polarization())
testSuite.addTest(test_suite_all_split())
testSuite.addTest(test_suite_all_bispev())
testSuite.addTest(test_suite_bug_regression())
testSuite.addTest(test_suite_all_watershed())
testSuite.addTest(test_suite_all_multi_geometry())
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all()
runner = unittest.TextTestRunner()
if runner.run(mysuite).wasSuccessful():
UtilsTest.clean_up()
else:
sys.exit(1)
pyFAI-0.11.0/test/mactrace.py 0000644 0001773 0001774 00000003263 12321446554 016746 0 ustar kieffer kieffer 0000000 0000000 # This is a simple module to help searching for segmentation fault.
# It works on any operating system but I needed it on MacOS-X as I was not
# able to use GDB as on linux.
#
# Usage python -m mactrace test.py
#
# it prints all line number for any executed statement
#
import sys, os
from optparse import OptionParser
class TraceWriter(object):
def __init__(self, myFile=sys.stdout):
self.file = myFile
def trace(self, frame, event, arg):
self.file.write("%s, %s:%d%s" % (event, frame.f_code.co_filename, frame.f_lineno, os.linesep))
self.file.flush()
return self.trace
def main():
usage = "mactrace.py [-o output_file_path] scriptfile [arg] ..."
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-o', '--outfile', dest="outfile",
help="Save trace to ", default=None)
if not sys.argv[1:]:
parser.print_usage()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if options.outfile:
twriter = TraceWriter(open(options.outfile, "w"))
else:
twriter = TraceWriter()
sys.settrace(twriter.trace)
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
eval(code, globs)
else:
parser.print_usage()
return parser
# When invoked as main program, invoke the profiler on a script
if __name__ == '__main__':
main()
pyFAI-0.11.0/test/blob.py 0000644 0001773 0001774 00000007537 12406056407 016113 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
# coding: utf-8
import sys, scipy
import matplotlib
matplotlib.use('Qt4Agg')
import pylab
from math import sqrt
import fabio,numpy
from utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
from pyFAI.blob_detection import BlobDetection
from pyFAI.detectors import detector_factory
def somme(im):
im[1:-1, 1:-1] += im[:-2, 1:-1] + im[2:, 1:-1] + im[1:-1, :-2] + im[1:-1, 2:] #+ im[:-2, :-2] + im[2:, 2:] + im[2:, :-2] + im[:-2, 2:]
return im
def image_test():
"Creating a test image containing several gaussian of several sizes"
img = numpy.zeros((128*4,128*4))
a = numpy.linspace(0.5, 8, 16)
xc = [64,64,64,64,192,192,192,192,320,320,320,320,448,448,448,448]
yc = [64,192,320,448,64,192,320,448,64,192,320,448,64,192,320,448]
cpt = 0
for sigma in a:
img = make_gaussian(img,sigma,xc[cpt],yc[cpt])
cpt = cpt + 1
img = add_noise(img, 0.1)
return img
def image_test_rings():
"Creating a test image containing gaussian spots on several rings"
rings = 10
mod = 50
detector = detector_factory("Titan")
sigma = detector.pixel1 * 4
shape = detector.max_shape
ai = AzimuthalIntegrator(detector=detector)
ai.setFit2D(1000, 1000, 1000)
r = ai.rArray(shape)
r_max = r.max()
chi = ai.chiArray(shape)
img = numpy.zeros(shape)
modulation = (1 + numpy.sin(5 * r + chi * mod))
for radius in numpy.linspace(0, r_max, rings):
img += numpy.exp(-(r - radius) ** 2 / (2 * (sigma * sigma)))
img *= modulation
img = add_noise(img, 0.0)
return img
def add_noise(img,rate):
noise = numpy.random.random(img.shape) * rate
return img+noise
def make_gaussian(im,sigma,xc,yc):
"Creating 2D gaussian to be put in a test image"
e = 1
angle = 0
sx = sigma * (1+e)
sy = sigma * (1-e)
size = int( 8*sigma +1 )
if size%2 == 0 :
size += 1
x = numpy.arange(0, size, 1, float)
y = x[:,numpy.newaxis]
# x = x * 2
x0 = y0 = size // 2
gausx = numpy.exp(-4*numpy.log(2) * (x-x0)**2 / sx**2)
gausy = numpy.exp(-4*numpy.log(2) * (y-y0)**2 / sy**2)
gaus = 0.01 + gausx * gausy
im[xc-size/2:xc+size/2+1,yc-size/2:yc+size/2+1] = scipy.ndimage.rotate(gaus,angle, reshape = False)
return im
if len(UtilsTest.options.args) > 0:
data = fabio.open(UtilsTest.options.args[0]).data
if len(UtilsTest.options.args) > 1:
msk = fabio.open(UtilsTest.options.args[1]).data
else:
msk = None
else:
data = image_test_rings()
msk = None
bd = BlobDetection(data, mask=msk)#, cur_sigma=0.25, init_sigma=numpy.sqrt(2)/2, dest_sigma=numpy.sqrt(2), scale_per_octave=2)
pylab.ion()
f=pylab.figure(1)
ax = f.add_subplot(111)
ax.imshow(numpy.log1p(data), interpolation = 'nearest')
for i in range(5):
print ('Octave #%i' %i)
bd._one_octave(shrink=True, refine = True, n_5 = False)
print("Octave #%i Total kp: %i" % (i, bd.keypoints.size))
# bd._one_octave(False, True ,False)
print ('Final size of keypoints : %i'% bd.keypoints.size)
i = 0
# for kp in bd.keypoints:
# ds = kp.sigma
# ax.annotate("", xy=(kp.x, kp.y), xytext=(kp.x+ds, kp.y+ds),
# arrowprops=dict(facecolor='blue', shrink=0.05),)
sigma = bd.keypoints.sigma
for i,c in enumerate("ygrcmykw"):
# j = 2 ** i
m = numpy.logical_and(sigma >= i, sigma < (i + 1))
ax.plot(bd.keypoints[m].x, bd.keypoints[m].y, "o" + c, label=str(i))
ax.legend()
if sigma.size > 0:
h = pylab.figure(2)
x, y, o = pylab.hist(sigma, bins=100)
h.show()
index = numpy.where(x == x.max())
kp = bd.keypoints[bd.keypoints.sigma > y[index]]
else : kp = bd.keypoints
# pylab.figure()
# pylab.imshow(numpy.log1p(data), interpolation = 'nearest')
# pylab.plot(kp.x,kp.y,'og')
f.show()
raw_input()
pyFAI-0.11.0/test/test_azimuthal_integrator.py 0000755 0001773 0001774 00000034075 12544200060 022455 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for Azimuthal integrator class"
from __future__ import absolute_import, print_function, division
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "22/06/2015"
import unittest
import os
import numpy
import logging
import time
import sys
import fabio
import tempfile
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger, recursive_delete
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.azimuthalIntegrator import AzimuthalIntegrator
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
tmp_dir = UtilsTest.tempdir
try:
from pyFAI.utils import input
except ImportError:
pass
class TestAzimPilatus(unittest.TestCase):
img = UtilsTest.getimage("1884/Pilatus6M.cbf")
def setUp(self):
"""Download files"""
self.data = fabio.open(self.img).data
self.ai = AzimuthalIntegrator(detector="pilatus6m")
self.ai.setFit2D(300, 1326, 1303)
def test_separate(self):
bragg, amorphous = self.ai.separate(self.data)
self.assert_(amorphous.max() < bragg.max(), "bragg is more intense than amorphous")
self.assert_(amorphous.std() < bragg.std(), "bragg is more variatic than amorphous")
class TestAzimHalfFrelon(unittest.TestCase):
"""basic test"""
fit2dFile = '1460/fit2d.dat'
halfFrelon = "1464/LaB6_0020.edf"
splineFile = "1461/halfccd.spline"
poniFile = "1463/LaB6.poni"
ai = None
fit2d = None
tmpfiles = {"cython": os.path.join(tmp_dir, "cython.dat"),
"cythonSP": os.path.join(tmp_dir, "cythonSP.dat"),
"numpy": os.path.join(tmp_dir, "numpy.dat")}
def setUp(self):
"""Download files"""
self.fit2dFile = UtilsTest.getimage(self.__class__.fit2dFile)
self.halfFrelon = UtilsTest.getimage(self.__class__.halfFrelon)
self.splineFile = UtilsTest.getimage(self.__class__.splineFile)
poniFile = UtilsTest.getimage(self.__class__.poniFile)
with open(poniFile) as f:
data = []
for line in f:
if line.startswith("SplineFile:"):
data.append("SplineFile: " + self.splineFile)
else:
data.append(line.strip())
self.poniFile = os.path.join(tmp_dir, os.path.basename(poniFile))
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
with open(self.poniFile, "w") as f:
f.write(os.linesep.join(data))
self.fit2d = numpy.loadtxt(self.fit2dFile)
self.ai = AzimuthalIntegrator()
self.ai.load(self.poniFile)
self.data = fabio.open(self.halfFrelon).data
for tmpfile in self.tmpfiles.values():
if os.path.isfile(tmpfile):
os.unlink(tmpfile)
def tearDown(self):
"""Remove temporary files"""
for fn in self.tmpfiles.values():
if os.path.exists(fn):
os.unlink(fn)
def test_numpy_vs_fit2d(self):
"""
Compare numpy histogram with results of fit2d
"""
# logger.info(self.ai.__repr__())
tth, I = self.ai.xrpd_numpy(self.data,
len(self.fit2d), self.tmpfiles["numpy"], correctSolidAngle=False)
rwp = Rwp((tth, I), self.fit2d.T)
logger.info("Rwp numpy/fit2d = %.3f" % rwp)
if logger.getEffectiveLevel() == logging.DEBUG:
logger.info("Plotting results")
fig = pylab.figure()
fig.suptitle('Numpy Histogram vs Fit2D: Rwp=%.3f' % rwp)
sp = fig.add_subplot(111)
sp.plot(self.fit2d.T[0], self.fit2d.T[1], "-b", label='fit2d')
sp.plot(tth, I, "-r", label="numpy histogram")
handles, labels = sp.get_legend_handles_labels()
fig.legend(handles, labels)
fig.show()
input("Press enter to quit")
assert rwp < 11
def test_cython_vs_fit2d(self):
"""
Compare cython histogram with results of fit2d
"""
# logger.info(self.ai.__repr__())
tth, I = self.ai.xrpd_cython(self.data,
len(self.fit2d), self.tmpfiles["cython"], correctSolidAngle=False, pixelSize=None)
# logger.info(tth)
# logger.info(I)
rwp = Rwp((tth, I), self.fit2d.T)
logger.info("Rwp cython/fit2d = %.3f" % rwp)
if logger.getEffectiveLevel() == logging.DEBUG:
logger.info("Plotting results")
fig = pylab.figure()
fig.suptitle('Cython Histogram vs Fit2D: Rwp=%.3f' % rwp)
sp = fig.add_subplot(111)
sp.plot(self.fit2d.T[0], self.fit2d.T[1], "-b", label='fit2d')
sp.plot(tth, I, "-r", label="cython")
handles, labels = sp.get_legend_handles_labels()
fig.legend(handles, labels)
fig.show()
input("Press enter to quit")
assert rwp < 11
def test_cythonSP_vs_fit2d(self):
"""
Compare cython splitPixel with results of fit2d
"""
logger.info(self.ai.__repr__())
pos = self.ai.cornerArray(self.data.shape)
t0 = time.time()
logger.info("in test_cythonSP_vs_fit2d Before SP")
tth, I = self.ai.xrpd_splitPixel(self.data,
len(self.fit2d),
self.tmpfiles["cythonSP"],
correctSolidAngle=False)
logger.info("in test_cythonSP_vs_fit2d Before")
t1 = time.time() - t0
# logger.info(tth)
# logger.info(I)
rwp = Rwp((tth, I), self.fit2d.T)
logger.info("Rwp cythonSP(t=%.3fs)/fit2d = %.3f" % (t1, rwp))
if logger.getEffectiveLevel() == logging.DEBUG:
logger.info("Plotting results")
fig = pylab.figure()
fig.suptitle('CythonSP Histogram vs Fit2D: Rwp=%.3f' % rwp)
sp = fig.add_subplot(111)
sp.plot(self.fit2d.T[0], self.fit2d.T[1], "-b", label='fit2d')
sp.plot(tth, I, "-r", label="cython")
handles, labels = sp.get_legend_handles_labels()
fig.legend(handles, labels)
fig.show()
input("Press enter to quit")
assert rwp < 11
def test_cython_vs_numpy(self):
"""
Compare cython histogram with numpy histogram
"""
# logger.info(self.ai.__repr__())
data = self.data
tth_np, I_np = self.ai.xrpd_numpy(data,
len(self.fit2d),
correctSolidAngle=False)
tth_cy, I_cy = self.ai.xrpd_cython(data,
len(self.fit2d),
correctSolidAngle=False)
logger.info("before xrpd_splitPixel")
tth_sp, I_sp = self.ai.xrpd_splitPixel(data,
len(self.fit2d),
correctSolidAngle=False)
logger.info("After xrpd_splitPixel")
rwp = Rwp((tth_cy, I_cy), (tth_np, I_np))
logger.info("Rwp = %.3f" % rwp)
if logger.getEffectiveLevel() == logging.DEBUG:
logging.info("Plotting results")
fig = pylab.figure()
fig.suptitle('Numpy Histogram vs Cython: Rwp=%.3f' % rwp)
sp = fig.add_subplot(111)
sp.plot(self.fit2d.T[0], self.fit2d.T[1], "-y", label='fit2d')
sp.plot(tth_np, I_np, "-b", label='numpy')
sp.plot(tth_cy, I_cy , "-r", label="cython")
sp.plot(tth_sp, I_sp , "-g", label="SplitPixel")
handles, labels = sp.get_legend_handles_labels()
fig.legend(handles, labels)
fig.show()
input("Press enter to quit")
assert rwp < 3
def test_separate(self):
"test separate with a mask. issue #209 regression test"
msk = self.data < 100
bragg, amorphous = self.ai.separate(self.data, mask=msk)
self.assert_(amorphous.max() < bragg.max(), "bragg is more intense than amorphous")
self.assert_(amorphous.std() < bragg.std(), "bragg is more variatic than amorphous")
class TestFlatimage(unittest.TestCase):
"""test the caking of a flat image"""
epsilon = 1e-4
def test_splitPixel(self):
data = numpy.ones((2000, 2000), dtype="float64")
ai = AzimuthalIntegrator(0.1, 1e-2, 1e-2, pixel1=1e-5, pixel2=1e-5)
I = ai.xrpd2_splitPixel(data, 2048, 2048, correctSolidAngle=False, dummy=-1.0)[0]
# I = ai.xrpd2(data, 2048, 2048, correctSolidAngle=False, dummy= -1.0)
if logger.getEffectiveLevel() == logging.DEBUG:
logging.info("Plotting results")
fig = pylab.figure()
fig.suptitle('cacking of a flat image: SplitPixel')
sp = fig.add_subplot(111)
sp.imshow(I, interpolation="nearest")
fig.show()
input("Press enter to quit")
I[I == -1.0] = 1.0
assert abs(I.min() - 1.0) < self.epsilon
assert abs(I.max() - 1.0) < self.epsilon
def test_splitBBox(self):
data = numpy.ones((2000, 2000), dtype="float64")
ai = AzimuthalIntegrator(0.1, 1e-2, 1e-2, pixel1=1e-5, pixel2=1e-5)
I = ai.xrpd2_splitBBox(data, 2048, 2048, correctSolidAngle=False, dummy=-1.0)[0]
# I = ai.xrpd2(data, 2048, 2048, correctSolidAngle=False, dummy= -1.0)
if logger.getEffectiveLevel() == logging.DEBUG:
logging.info("Plotting results")
fig = pylab.figure()
fig.suptitle('cacking of a flat image: SplitBBox')
sp = fig.add_subplot(111)
sp.imshow(I, interpolation="nearest")
fig.show()
input("Press enter to quit")
I[I == -1.0] = 1.0
assert abs(I.min() - 1.0) < self.epsilon
assert abs(I.max() - 1.0) < self.epsilon
class test_saxs(unittest.TestCase):
saxsPilatus = "1492/bsa_013_01.edf"
maskFile = "1491/Pcon_01Apr_msk.edf"
maskRef = "1490/bioSaxsMaskOnly.edf"
ai = AzimuthalIntegrator(detector="Pilatus1M")
def setUp(self):
self.edfPilatus = UtilsTest.getimage(self.__class__.saxsPilatus)
self.maskFile = UtilsTest.getimage(self.__class__.maskFile)
self.maskRef = UtilsTest.getimage(self.__class__.maskRef)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
def test_mask(self):
"""test the generation of mask"""
data = fabio.open(self.edfPilatus).data
mask = fabio.open(self.maskFile).data
self.assert_(abs(self.ai.create_mask(data, mask=mask).astype(int) - fabio.open(self.maskRef).data).max() == 0, "test without dummy")
# self.assert_(abs(self.ai.create_mask(data, mask=mask, dummy=-48912, delta_dummy=40000).astype(int) - fabio.open(self.maskDummy).data).max() == 0, "test_dummy")
class TestSetter(unittest.TestCase):
def setUp(self):
self.ai = AzimuthalIntegrator()
shape = (10, 15)
self.rnd1 = numpy.random.random(shape).astype(numpy.float32)
self.rnd2 = numpy.random.random(shape).astype(numpy.float32)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
fd, self.edf1 = tempfile.mkstemp(".edf", "testAI1", tmp_dir)
os.close(fd)
fd, self.edf2 = tempfile.mkstemp(".edf", "testAI2", tmp_dir)
os.close(fd)
fabio.edfimage.edfimage(data=self.rnd1).write(self.edf1)
fabio.edfimage.edfimage(data=self.rnd2).write(self.edf2)
def tearDown(self):
recursive_delete(tmp_dir)
def test_flat(self):
self.ai.set_flatfiles((self.edf1, self.edf2), method="mean")
self.assert_(self.ai.flatfiles == "%s(%s,%s)" % ("mean", self.edf1, self.edf2), "flatfiles string is OK")
self.assert_(abs(self.ai.flatfield - 0.5 * (self.rnd1 + self.rnd2)).max() == 0, "Flat array is OK")
def test_dark(self):
self.ai.set_darkfiles((self.edf1, self.edf2), method="mean")
self.assert_(self.ai.darkfiles == "%s(%s,%s)" % ("mean", self.edf1, self.edf2), "darkfiles string is OK")
self.assert_(abs(self.ai.darkcurrent - 0.5 * (self.rnd1 + self.rnd2)).max() == 0, "Dark array is OK")
def test_suite_all_AzimuthalIntegration():
testSuite = unittest.TestSuite()
testSuite.addTest(TestAzimHalfFrelon("test_cython_vs_fit2d"))
testSuite.addTest(TestAzimHalfFrelon("test_numpy_vs_fit2d"))
testSuite.addTest(TestAzimHalfFrelon("test_cythonSP_vs_fit2d"))
testSuite.addTest(TestAzimHalfFrelon("test_cython_vs_numpy"))
testSuite.addTest(TestAzimHalfFrelon("test_separate"))
testSuite.addTest(TestFlatimage("test_splitPixel"))
testSuite.addTest(TestFlatimage("test_splitBBox"))
testSuite.addTest(TestSetter("test_flat"))
testSuite.addTest(TestSetter("test_dark"))
testSuite.addTest(TestAzimPilatus("test_separate"))
# This test is known to be broken ...
testSuite.addTest(test_saxs("test_mask"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_AzimuthalIntegration()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up()
pyFAI-0.11.0/test/test_watershed.py 0000644 0001773 0001774 00000004434 12527541311 020210 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
from __future__ import division, print_function, absolute_import
"test suite for inverse watershed space segmenting code."
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "07/04/2015"
import unittest
import numpy
import sys
import os
import fabio
import tempfile
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger, recursive_delete
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
import pyFAI.watershed
class TestWatershed(unittest.TestCase):
fname = "1883/Pilatus1M.edf"
def setUp(self):
self.data = fabio.open(UtilsTest.getimage(self.fname)).data
def test_init(self):
w = pyFAI.watershed.InverseWatershed(data=self.data)
w.init()
print(len(w.regions))
from sys import getsizeof
print(getsizeof(w))
w.__dealloc__()
print(getsizeof(w))
def test_suite_all_watershed():
testSuite = unittest.TestSuite()
testSuite.addTest(TestWatershed("test_init"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_watershed()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up()
pyFAI-0.11.0/test/lima_cfg.json 0000644 0001773 0001774 00000000002 12553735122 017234 0 ustar kieffer kieffer 0000000 0000000 {} pyFAI-0.11.0/test/debug_split_pixel.py 0000644 0001773 0001774 00000001450 12422256263 020663 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
import pyFAI, numpy
img = numpy.zeros((512,512))
for i in range(1,6):img[i*100,i*100]=1
det = pyFAI.detectors.Detector(1e-4,1e-4)
det.shape=(512,512)
ai=pyFAI.AzimuthalIntegrator(1,detector=det)
import pylab
from utilstest import Rwp
results = {}
for i, meth in enumerate(["cython", "splitbbox", "splitpixel", "csr_no", "csr_bbox", "csr_full"]):
tth, I = ai.integrate1d(img, 10000, method=meth, unit="2th_deg")
pylab.plot(tth, I + i * 1e-3, label=meth)
ai.reset()
results[meth]=tth, I
print("no_split R=%.3f" % Rwp(results["csr_no"], results["cython"]))
print("split_bbox R=%.3f" % Rwp(results["csr_bbox"], results["splitbbox"]))
print("split_full R=%.3f" % Rwp(results["csr_full"], results["splitpixel"]))
pylab.legend()
pylab.ion()
pylab.show()
raw_input("enter_to_quit")
pyFAI-0.11.0/test/memleak.py 0000644 0001773 0001774 00000004772 12527541311 016603 0 ustar kieffer kieffer 0000000 0000000 from __future__ import absolute_import, print_function, division, with_statement
import sys, os
import distutils.util
platform = distutils.util.get_platform()
architecture = "lib.%s-%i.%i" % (platform, sys.version_info[0], sys.version_info[1])
sys.path.insert(0, os.path.join("build", architecture))
import numpy
import pyFAI
print(pyFAI)
import sys
import gc
def get_mem():
"""
Returns the occupied memory for memory-leak hunting in MByte
"""
pid = os.getpid()
if os.path.exists("/proc/%i/status" % pid):
for l in open("/proc/%i/status" % pid):
if l.startswith("VmRSS"):
mem = int(l.split(":", 1)[1].split()[0]) / 1024.
else:
mem = 0
return mem
pos0 = numpy.arange(2048 * 2048).reshape(2048, 2048)
dpos0 = numpy.ones_like(pos0)
print("Instancition 1")
lut = pyFAI.splitBBoxLUT.HistoBBox1d(pos0, dpos0, bins=800)
print("Size of LUT: %s" % lut.lut.nbytes)
print("ref count of lut.lut: %s %s" % (sys.getrefcount(lut), sys.getrefcount(lut.lut)))
print(sys.getrefcount(lut.cpos0), sys.getrefcount(lut.dpos0), sys.getrefcount(lut.lut))
print()
print("Cpos0, refcount=: %s %s" % (sys.getrefcount(lut.cpos0), len(gc.get_referrers(lut.cpos0))))
for obj in gc.get_referrers(lut.cpos0):
print("Cpos0: %s" % str(obj)[:100])
print()
# print(gc.get_referrers(lut.dpos0))
print("Lut, refcount=: %s %s" % (sys.getrefcount(lut.lut), len(gc.get_referrers(lut.lut))))
for obj in gc.get_referrers(lut.lut):
print("Lut: %s" % str(obj)[:100])
import pyFAI.splitBBoxCSR
lut = pyFAI.splitBBoxCSR.HistoBBox1d(pos0, dpos0, bins=800)
print("Size of LUT: %s" % lut.nnz)
print("ref count of lut.lut: %s %s" % (sys.getrefcount(lut), sys.getrefcount(lut.data)))
print(sys.getrefcount(lut.cpos0), sys.getrefcount(lut.dpos0), sys.getrefcount(lut.data))
print()
print("Cpos0, refcount=: %s %s" % (sys.getrefcount(lut.cpos0), len(gc.get_referrers(lut.cpos0))))
for obj in gc.get_referrers(lut.cpos0):
print("Cpos0: %s" % str(obj)[:100])
print()
# print(gc.get_referrers(lut.dpos0))
print("Lut, refcount=: %s %s" % (sys.getrefcount(lut.data), len(gc.get_referrers(lut.data))))
for obj in gc.get_referrers(lut.data):
print("Lut: %s" % str(obj)[:100])
print("Finished ")
while True:
lut = pyFAI.splitBBoxLUT.HistoBBox1d(pos0, dpos0, bins=800)
print(sys.getrefcount(lut.lut))
lut.integrate(numpy.random.random(pos0.shape))
print("Memory: %s, lut size: %s, refcount: %s" % (get_mem(), lut.lut.nbytes / 2 ** 20, sys.getrefcount(lut.lut)))
pyFAI-0.11.0/test/test_geometry_refinement.py 0000755 0001773 0001774 00000025676 12527541311 022307 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for Geometric Refinement class"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "05/03/2015"
import unittest
import os
import numpy
import sys
import random
# Nota: UtilsTest is a static class with initialization at import.
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
try:
from .utilstest import UtilsTest, getLogger
except (ValueError, SystemError, ImportError):
from utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI import geometryRefinement
GeometryRefinement = geometryRefinement.GeometryRefinement
class TestGeometryRefinement(unittest.TestCase):
""" tests geometric refinements with or without spline"""
def test_noSpline(self):
"""tests geometric refinements without spline"""
pixelSize = [1.5e-5, 1.5e-5]
data = [
[1585.9999996029055, 2893.9999991192408, 0.53005649383067788],
[1853.9999932086102, 2873.0000001637909, 0.53005649383067788],
[2163.9999987531855, 2854.9999987738884, 0.53005649383067788],
[2699.9999977914931, 2893.9999985831755, 0.53005649383067788],
[3186.9999966428777, 3028.9999985930604, 0.53005649383067788],
[3595.0000039534661, 3167.0000022967461, 0.53005649383067788],
[3835.0000007197755, 3300.0000002536408, 0.53005649383067788],
[1252.0000026881371, 2984.0000056421914, 0.53005649383067788],
[576.99992486352289, 3220.0000014469815, 0.53005649383067788],
[52.999989546760531, 3531.9999975314959, 0.53005649383067788],
[520.99999862452842, 2424.0000005943775, 0.65327673902147754],
[1108.0000045189499, 2239.9999793751085, 0.65327673902147754],
[2022.0000098770186, 2136.9999921020726, 0.65327673902147754],
[2436.000002384907, 2137.0000034435734, 0.65327673902147754],
[2797.9999973906524, 2169.9999849019205, 0.65327673902147754],
[3516.0000041508365, 2354.0000059814265, 0.65327673902147754],
[3870.9999995625412, 2464.9999964079757, 0.65327673902147754],
[3735.9999952703465, 2417.9999888223151, 0.65327673902147754],
[3374.0001428680412, 2289.9999885080188, 0.65327673902147754],
[1709.99999872134, 2165.0000006693272, 0.65327673902147754],
[2004.0000081015958, 1471.0000012076148, 0.7592182246175333],
[2213.0000015244159, 1464.0000243454842, 0.7592182246175333],
[2115.9999952456633, 1475.0000015176133, 0.7592182246175333],
[2242.0000023736206, 1477.0000046142911, 0.7592182246175333],
[2463.9999967564663, 1464.0000011704756, 0.7592182246175333],
[2986.000011249705, 1540.9999994523619, 0.7592182246175333],
[2760.0000031761901, 1514.0000002442944, 0.7592182246175333],
[3372.0000025298395, 1617.9999995345927, 0.7592182246175333],
[3187.0000005152106, 1564.9999952212884, 0.7592182246175333],
[3952.0000062252166, 1765.0000234029771, 0.7592182246175333],
[200.99999875941003, 1190.0000046393075, 0.85451320177642376],
[463.00000674257342, 1121.9999956648539, 0.85451320177642376],
[1455.0000001416358, 936.99999830341949, 0.85451320177642376],
[1673.9999958962637, 927.99999934328309, 0.85451320177642376],
[2492.0000021823594, 922.00000383122256, 0.85451320177642376],
[2639.9999948599761, 936.00000247819059, 0.85451320177642376],
[3476.9999490636446, 1027.9999838362451, 0.85451320177642376],
[3638.9999965727247, 1088.0000258143732, 0.85451320177642376],
[4002.0000051610787, 1149.9999925115812, 0.85451320177642376],
[2296.9999822277705, 908.00000939182382, 0.85451320177642376],
[266.00000015817864, 576.00000049157074, 0.94195419730133967],
[364.00001493127616, 564.00000136247968, 0.94195419730133967],
[752.99999958240187, 496.9999948653093, 0.94195419730133967],
[845.99999758606646, 479.00000730401808, 0.94195419730133967],
[1152.0000082161678, 421.9999937722655, 0.94195419730133967],
[1215.0000019951258, 431.00019867504369, 0.94195419730133967],
[1728.0000096657914, 368.00000247754218, 0.94195419730133967],
[2095.9999932673395, 365.99999862304219, 0.94195419730133967],
[2194.0000006543587, 356.99999967534075, 0.94195419730133967],
[2598.0000021676074, 386.99999979901884, 0.94195419730133967],
[2959.9998766657627, 410.00000323183838, 0.94195419730133967],
]
data = numpy.array(data, dtype=numpy.float64)
# tth = data[:,2]
ring = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5,
5, 5, 5, 5, 5]
ds = [ 4.15695 , 2.93940753, 2.4000162 , 2.078475 , 1.85904456,
1.69706773, 1.46970377, 1.38565 , 1.31454301, 1.25336758,
1.2000081 , 1.15293049, 1.11099162, 1.0392375 , 1.00820847,
0.97980251, 0.95366973, 0.92952228, 0.90712086, 0.88626472,
0.84853387, 0.83139 , 0.81524497, 0.8000054 , 0.77192624,
0.75895176, 0.73485188, 0.72363211, 0.71291104, 0.7026528 ,
0.692825 , 0.68339837, 0.67434634, 0.65727151, 0.64920652,
0.64143131, 0.63392893, 0.62668379, 0.61968152, 0.61290884,
0.60000405, 0.59385 , 0.58788151, 0.58208943, 0.57646525,
0.571001 , 0.56568924, 0.55549581, 0.55060148, 0.54583428,
0.54118879, 0.53224291, 0.52793318, 0.52372647, 0.51961875,
0.51560619, 0.51168517, 0.50785227, 0.50410423, 0.50043797,
0.49685056] # LaB6
wavelength = 1.54e-10
calibrant = pyFAI.calibrant.Calibrant(dSpacing=ds, wavelength=wavelength)
# calibrant = pyFAI.calibrant.ALL_CALIBRANTS["LaB6"]
data[:, 2] = ring
r = GeometryRefinement(data, pixel1=pixelSize[0], pixel2=pixelSize[1],
wavelength=wavelength, calibrant=calibrant)
r.refine2(10000000)
# ref = numpy.array([0.089652, 0.030970, 0.027668, -0.699407, 0.010067, 0.000001])
ref = numpy.array([0.089750, 0.030897, 0.027172, -0.704730, 0.010649, 3.51e-06])
self.assertAlmostEqual(abs(numpy.array(r.param) - ref).max(), 0.0, 3, "ref=%s obt=%s delta=%s" % (list(ref), r.param, abs(numpy.array(r.param) - ref)))
def test_Spline(self):
"""tests geometric refinements with spline"""
splineFine = UtilsTest.getimage("1900/frelon.spline")
data = [[795, 288, 0.3490658503988659],
[890, 260, 0.3490658503988659],
[948, 249, 0.3490658503988659],
[710, 325, 0.3490658503988659],
[601, 392, 0.3490658503988659],
[1167, 248, 0.3490658503988659],
[1200, 340, 0.3490658503988659],
[1319, 285, 0.3490658503988659],
[1362, 302, 0.3490658503988659],
[1436, 338, 0.3490658503988659],
[1526, 397, 0.3490658503988659],
[1560, 424, 0.3490658503988659],
[1615, 476, 0.3490658503988659],
[1662, 529, 0.3490658503988659],
[1742, 650, 0.3490658503988659],
[1778, 727, 0.3490658503988659],
[1824, 891, 0.3490658503988659],
[1831, 947, 0.3490658503988659],
[1832, 1063, 0.3490658503988659],
[1828, 1106, 0.3490658503988659],
[1828, 1106, 0.3490658503988659],
[1810, 1202, 0.3490658503988659],
[1775, 1307, 0.3490658503988659],
[1724, 1407, 0.3490658503988659],
[1655, 1502, 0.3490658503988659],
[1489, 1649, 0.3490658503988659],
[1397, 1700, 0.3490658503988659],
[1251, 1752, 0.3490658503988659],
[1126, 1772, 0.3490658503988659],
[984, 1770, 0.3490658503988659],
[907, 1758, 0.3490658503988659],
[801, 1728, 0.3490658503988659],
[696, 1681, 0.3490658503988659],
[634, 1644, 0.3490658503988659],
[568, 1596, 0.3490658503988659],
[520, 1553, 0.3490658503988659],
[453, 1479, 0.3490658503988659],
[403, 1408, 0.3490658503988659],
[403, 1408, 0.3490658503988659],
[363, 1337, 0.3490658503988659],
[320, 1228, 0.3490658503988659],
[303, 1161, 0.3490658503988659],
[287, 1023, 0.3490658503988659],
[287, 993, 0.3490658503988659],
[304, 846, 0.3490658503988659],
[329, 758, 0.3490658503988659],
[341, 726, 0.3490658503988659],
[402, 606, 0.3490658503988659],
[437, 555, 0.3490658503988659],
[513, 467, 0.3490658503988659]
]
# data = numpy.array(data)
random.shuffle(data)
tth = data[0][2]
# data[:, 2] = ring
wl = 2e-10 * numpy.sin(tth / 2.0)
ds = [1.0]
calibrant = pyFAI.calibrant.Calibrant(dSpacing=ds, wavelength=wl)
# print tth, wl, ds, 2 * ds[0] * numpy.sin(tth / 2)
r2 = GeometryRefinement(data, dist=0.1, splineFile=splineFine, wavelength=wl, calibrant=calibrant)
# r2.poni1 = 5e-2
# r2.poni2 = 5e-2
r2.rot1_max = 0.0001
r2.rot1_min = -0.0001
r2.rot2_max = 0.0001
r2.rot2_min = -0.0001
r2.rot3_max = 0.0001
r2.rot3_min = -0.0001
r2.refine2(10000000)
ref2 = numpy.array([0.1, 4.917310e-02, 4.722438e-02, 0 , 0. , 0.00000])
# print "ref", ref2
# print "obt", r2.param
for i, key in enumerate(("dist", "poni1", "poni2", "rot1", "rot2", "rot3")):
self.assertAlmostEqual(ref2[i], r2.__getattribute__(key), 3,
"%s is %s, I expected %s%s%s" % (key, r2.__getattribute__(key) , ref2[i], os.linesep, r2))
# assert abs(numpy.array(r2.param) - ref2).max() < 1e-3
def test_suite_all_GeometryRefinement():
testSuite = unittest.TestSuite()
testSuite.addTest(TestGeometryRefinement("test_noSpline"))
testSuite.addTest(TestGeometryRefinement("test_Spline"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_GeometryRefinement()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/profileDistortionCSR.py 0000644 0001773 0001774 00000010644 12527541311 021252 0 ustar kieffer kieffer 0000000 0000000 # !/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Azimuthal integration
# https://forge.epn-campus.eu/projects/azimuthal
#
# File: "$Id$"
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
from __future__ import print_function, with_statement, division
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
__status__ = "development"
import unittest
from utilstest import UtilsTest, getLogger
import logging, threading
import types, os, sys
import numpy
logger = logging.getLogger("pyFAI.distortion")
logging.basicConfig(level=logging.INFO)
from math import ceil, floor
pyFAI = sys.modules["pyFAI"]
from pyFAI import detectors, ocl_azim_lut, _distortion, _distortionCSR, distortion
from pyFAI.utils import timeit
import fabio
# import pyFAI._distortion
# import pyFAI._distortionCSR
def test():
# workin on 256x256
# x, y = numpy.ogrid[:256, :256]
# grid = numpy.logical_or(x % 10 == 0, y % 10 == 0) + numpy.ones((256, 256), numpy.float32)
# det = detectors.FReLoN("frelon_8_8.spline")
# # working with halfccd spline
x, y = numpy.ogrid[:1024, :2048]
grid = numpy.logical_or(x % 100 == 0, y % 100 == 0) + numpy.ones((1024, 2048), numpy.float32)
splineFilePath = "1461/halfccd.spline"
splineFile = UtilsTest.getimage(splineFilePath)
det = detectors.FReLoN(splineFile)
# working with halfccd spline
# x, y = numpy.ogrid[:2048, :2048]
# grid = numpy.logical_or(x % 100 == 0, y % 100 == 0).astype(numpy.float32) + numpy.ones((2048, 2048), numpy.float32)
# det = detectors.FReLoN("frelon.spline")
print(det, det.max_shape)
disLUT = _distortion.Distortion(det)
print(disLUT)
lut = disLUT.calc_LUT_size()
print(disLUT.lut_size)
print(lut.mean())
disLUT.calc_LUT()
outLUT = disLUT.correct(grid)
fabio.edfimage.edfimage(data=outLUT.astype("float32")).write("test_correct_LUT.edf")
print("*"*50)
print(det, det.max_shape)
disCSR = _distortionCSR.Distortion(det, foo=64)
print(disCSR)
lut = disCSR.calc_LUT_size()
print(disCSR.lut_size)
print(lut.mean())
disCSR.calc_LUT()
outCSR = disCSR.correct(grid)
fabio.edfimage.edfimage(data=outCSR.astype("float32")).write("test_correct_CSR.edf")
print("*"*50)
disCSR.setDevice()
outCSRocl = disCSR.correct(grid)
fabio.edfimage.edfimage(data=outCSRocl.astype("float32")).write("test_correct_CSR.edf")
print("*"*50)
print(det, det.max_shape)
disLUTpy = distortion.Distortion(det)
print(disLUTpy)
lut = disLUTpy.calc_LUT_size()
print(disLUTpy.lut_size)
print(lut.mean())
disLUTpy.calc_LUT()
outLUTpy = disLUTpy.correct(grid)
fabio.edfimage.edfimage(data=outLUTpy.astype("float32")).write("test_correct_LUT.edf")
print("*"*50)
# x, y = numpy.ogrid[:2048, :2048]
# grid = numpy.logical_or(x % 100 == 0, y % 100 == 0)
# det = detectors.FReLoN("frelon.spline")
# print( det, det.max_shape)
# dis = Distortion(det)
# print(dis
# lut = dis.calc_LUT_size()
# print(dis.lut_size
# print("LUT mean & max", lut.mean(), lut.max()
# dis.calc_LUT()
# out = dis.correct(grid)
# fabio.edfimage.edfimage(data=out.astype("float32")).write("test2048.edf")
import pylab
# pylab.imshow(outLUT)
# pylab.show()
# pylab.imshow(outCSR) # , interpolation="nearest")
# , interpolation="nearest")
# pylab.show()
pylab.imshow(outCSRocl)
pylab.show()
# pylab.imshow(outLUTpy)
# pylab.show()
assert numpy.allclose(outLUT, outCSRocl)
if __name__ == "__main__":
det = dis = lut = None
test()
pyFAI-0.11.0/test/profile_csr.py 0000644 0001773 0001774 00000003270 12527541311 017467 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import, division, print_function
import sys, numpy, time
import utilstest
import fabio, pyopencl
from pylab import *
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitBBox
from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/Pilatus1M.poni")
data = fabio.open("testimages/Pilatus1M.edf").data
ref = ai.xrpd_LUT(data, 1000)[1]
obt = ai.xrpd_LUT_OCL(data, 1000)[1]
logger.debug("check LUT basics: %s"%abs(obt[1] - ref[1]).max())
assert numpy.allclose(ref,obt)
cyt_lut = pyFAI.splitBBoxLUT.HistoBBox1d(
ai._ttha,
ai._dttha,
bins=1000,
unit="2th_deg")
ocl_lut = pyFAI.ocl_azim_lut.OCL_LUT_Integrator(cyt_lut.lut, data.size, "GPU",profile=True)
print( "OpenCL Device", ocl_lut.device)
cyt_csr = pyFAI.splitBBoxCSR.HistoBBox1d(
ai._ttha,
ai._dttha,
bins=1000,
unit="2th_deg")
out_cyt_lut = cyt_lut.integrate(data)[1]
out_ocl_lut = ocl_lut.integrate(data)[0]
#out_ocl_csr = ocl_csr.integrate(data)[0]
out_cyt_csr = cyt_csr.integrate(data)[1]
print ("lut cpu vs lut gpu",abs(out_cyt_lut - out_ocl_lut).max())
assert numpy.allclose(out_cyt_lut, out_ocl_lut)
print ("lut cpu vs csr cpu",abs(out_cyt_lut - out_cyt_csr).max())
#assert numpy.allclose(out_cyt_lut, out_cyt_csr)
ocl_lut.log_profile()
plot(out_cyt_lut, label="cyt_lut" )
plot(out_ocl_lut, label="ocl_lut")
plot(out_cyt_csr, label="cyt_csr" )
#plot(out_ocl-out_cyt, label="delta")
legend()
show()
raw_input()
pyFAI-0.11.0/test/bug_ocl_cpu.py 0000755 0001773 0001774 00000000236 12321446554 017450 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
import pyFAI, numpy
ai = pyFAI.load("moke.poni")
shape = (600, 600)
ai.xrpd_OpenCL(numpy.ones(shape), 500, devicetype="cpu", useFp64=False)
pyFAI-0.11.0/test/utilstest.py 0000644 0001773 0001774 00000037476 12527541311 017237 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# coding: utf-8
#
# Project: pyFAI tests class utilities
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) 2010-2014 European Synchrotron Radiation Facility
# Grenoble, France
#
# Principal authors: Jérôme KIEFFER (jerome.kieffer@esrf.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
from __future__ import print_function, division, absolute_import, with_statement
__author__ = "Jérôme Kieffer"
__contact__ = "jerome.kieffer@esrf.eu"
__license__ = "LGPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "02/02/2015"
PACKAGE = "pyFAI"
SOURCES = PACKAGE + "-src"
DATA_KEY = "PYFAI_DATA"
if __name__ == "__main__":
__name__ = "pyFAI.test"
import os
import imp
import sys
import subprocess
import threading
import distutils.util
import logging
try: # Python3
from urllib.request import urlopen, ProxyHandler, build_opener
except ImportError: # Python2
from urllib2 import urlopen, ProxyHandler, build_opener
# import urllib2
import numpy
import shutil
import json
import tempfile
logging.basicConfig(level=logging.WARNING)
logger = logging.getLogger("%s.utilstest" % PACKAGE)
TEST_HOME = os.path.dirname(os.path.abspath(__file__))
IN_SOURCES = SOURCES in os.listdir(os.path.dirname(TEST_HOME))
if IN_SOURCES:
os.environ[DATA_KEY] = os.path.dirname(TEST_HOME)
import getpass
login = getpass.getuser()
def copy(infile, outfile):
"link or copy file according to the OS"
if "link" in dir(os):
os.link(infile, outfile)
else:
shutil.copy(infile, outfile)
class UtilsTest(object):
"""
Static class providing useful stuff for preparing tests.
"""
options = None
timeout = 60 # timeout in seconds for downloading images
url_base = "http://forge.epn-campus.eu/attachments/download"
sem = threading.Semaphore()
recompiled = False
reloaded = False
name = PACKAGE
if IN_SOURCES:
image_home = os.path.join(TEST_HOME, "testimages")
if not os.path.isdir(image_home):
os.makedirs(image_home)
testimages = os.path.join(TEST_HOME, "all_testimages.json")
if os.path.exists(testimages):
with open(testimages) as f:
ALL_DOWNLOADED_FILES = set(json.load(f))
else:
ALL_DOWNLOADED_FILES = set()
platform = distutils.util.get_platform()
architecture = "lib.%s-%i.%i" % (platform,
sys.version_info[0], sys.version_info[1])
if os.environ.get("PYBUILD_NAME") == name:
# we are in the debian packaging way
home = os.environ.get("PYTHONPATH", "").split(os.pathsep)[-1]
elif os.environ.get("BUILDPYTHONPATH"):
home = os.path.abspath(os.environ.get("BUILDPYTHONPATH", ""))
else:
home = os.path.join(os.path.dirname(TEST_HOME),
"build", architecture)
logger.info("%s Home is: %s" % (name, home))
if name in sys.modules:
logger.info("%s module was already loaded from %s" % (name, sys.modules[name]))
pyFAI = None
sys.modules.pop(name)
for key in sys.modules.copy():
if key.startswith(name + "."):
sys.modules.pop(key)
print(home)
if not os.path.isdir(home):
with sem:
if not os.path.isdir(home):
logger.warning("Building pyFAI to %s" % home)
p = subprocess.Popen([sys.executable, "setup.py", "build"],
shell=False, cwd=os.path.dirname(TEST_HOME))
logger.info("subprocess ended with rc= %s" % p.wait())
recompiled = True
logger.info("Loading %s" % name)
try:
pyFAI = imp.load_module(*((name,) + imp.find_module(name, [home])))
except Exception as error:
logger.warning("Unable to loading %s %s" % (name, error))
if "-r" not in sys.argv:
logger.warning("Remove build and start from scratch %s" % error)
sys.argv.append("-r")
else:
import pyFAI.utils
pyFAI.utils.depreclog.setLevel(logging.ERROR)
else:
image_home = os.path.join(tempfile.gettempdir(), "%s_testimages_%s" % (name, login))
if not os.path.exists(image_home):
os.makedirs(image_home)
testimages = os.path.join(image_home, "all_testimages.json")
if os.path.exists(testimages):
with open(testimages) as f:
ALL_DOWNLOADED_FILES = set(json.load(f))
else:
ALL_DOWNLOADED_FILES = set()
# print("Call tempfile.mkdtemp(os.getlogin(), name) with %s %s" % (login, name))
tempdir = tempfile.mkdtemp(login, name)
@classmethod
def clean_up(cls):
recursive_delete(cls.tempdir)
@classmethod
def deep_reload(cls):
if not IN_SOURCES:
cls.pyFAI = __import__(cls.name)
return cls.pyFAI
if cls.reloaded:
return cls.pyFAI
logger.info("Loading %s" % cls.name)
cls.pyFAI = None
pyFAI = None
sys.path.insert(0, cls.home)
for key in sys.modules.copy():
if key.startswith(cls.name):
sys.modules.pop(key)
cls.pyFAI = __import__(cls.name)
logger.info("%s loaded from %s" % (cls.name, cls.pyFAI.__file__))
sys.modules[cls.name] = cls.pyFAI
cls.reloaded = True
import pyFAI.utils
pyFAI.utils.depreclog.setLevel(logging.ERROR)
return cls.pyFAI
@classmethod
def forceBuild(cls, remove_first=True):
"""
force the recompilation of pyFAI
"""
if not IN_SOURCES:
return
if not cls.recompiled:
with cls.sem:
if not cls.recompiled:
logger.info("Building %s to %s" % (cls.name, cls.home))
if cls.name in sys.modules:
logger.info("%s module was already loaded from %s" % (cls.name, sys.modules[cls.name]))
cls.pyFAI = None
sys.modules.pop(cls.name)
if remove_first:
recursive_delete(cls.home)
p = subprocess.Popen([sys.executable, "setup.py", "build"],
shell=False, cwd=os.path.dirname(TEST_HOME))
logger.info("subprocess ended with rc= %s" % p.wait())
cls.pyFAI = cls.deep_reload()
cls.recompiled = True
@classmethod
def timeoutDuringDownload(cls, imagename=None):
"""
Function called after a timeout in the download part ...
just raise an Exception.
"""
if imagename is None:
imagename = "2252/testimages.tar.bz2 unzip it "
raise RuntimeError("Could not automatically \
download test images!\n \ If you are behind a firewall, \
please set both environment variable http_proxy and https_proxy.\
This even works under windows ! \n \
Otherwise please try to download the images manually from \n %s/%s and put it in in test/testimages." % (cls.url_base, imagename))
@classmethod
def getimage(cls, imagename):
"""
Downloads the requested image from Forge.EPN-campus.eu
@param: name of the image.
For the RedMine forge, the filename contains a directory name that is removed
@return: full path of the locally saved file
"""
if imagename not in cls.ALL_DOWNLOADED_FILES:
cls.ALL_DOWNLOADED_FILES.add(imagename)
with open(cls.testimages, "w") as fp:
json.dump(list(cls.ALL_DOWNLOADED_FILES), fp, indent=4)
baseimage = os.path.basename(imagename)
logger.info("UtilsTest.getimage('%s')" % baseimage)
fullimagename = os.path.abspath(os.path.join(cls.image_home, baseimage))
if not os.path.isfile(fullimagename):
logger.info("Trying to download image %s, timeout set to %ss",
imagename, cls.timeout)
dictProxies = {}
if "http_proxy" in os.environ:
dictProxies['http'] = os.environ["http_proxy"]
dictProxies['https'] = os.environ["http_proxy"]
if "https_proxy" in os.environ:
dictProxies['https'] = os.environ["https_proxy"]
if dictProxies:
proxy_handler = ProxyHandler(dictProxies)
opener = build_opener(proxy_handler).open
else:
opener = urlopen
logger.info("wget %s/%s" % (cls.url_base, imagename))
data = opener("%s/%s" % (cls.url_base, imagename),
data=None, timeout=cls.timeout).read()
logger.info("Image %s successfully downloaded." % baseimage)
try:
with open(fullimagename, "wb") as outfile:
outfile.write(data)
except IOError:
raise IOError("unable to write downloaded \
data to disk at %s" % cls.image_home)
if not os.path.isfile(fullimagename):
raise RuntimeError("Could not automatically \
download test images %s!\n \ If you are behind a firewall, \
please set both environment variable http_proxy and https_proxy.\
This even works under windows ! \n \
Otherwise please try to download the images manually from \n%s/%s" % (imagename, cls.url_base, imagename))
return fullimagename
@classmethod
def download_images(cls, imgs=None):
"""
Download all images needed for the test/benchmarks
@param imgs: list of files to download
"""
if not imgs:
imgs = cls.ALL_DOWNLOADED_FILES
for fn in imgs:
print("Downloading from internet: %s" % fn)
cls.getimage(fn)
@classmethod
def get_options(cls):
"""
Parse the command line to analyse options ... returns options
"""
if cls.options is None:
try:
from argparse import ArgumentParser
except:
from pyFAI.third_party.argparse import ArgumentParser
parser = ArgumentParser(usage="Tests for %s" % cls.name)
parser.add_argument("-d", "--debug", dest="debug", help="run in debugging mode",
default=False, action="store_true")
parser.add_argument("-i", "--info", dest="info", help="run in more verbose mode ",
default=False, action="store_true")
parser.add_argument("-f", "--force", dest="force", help="force the build of the library",
default=False, action="store_true")
parser.add_argument("-r", "--really-force", dest="remove",
help="remove existing build and force the build of the library",
default=False, action="store_true")
parser.add_argument(dest="args", type=str, nargs='*')
if IN_SOURCES:
cls.options = parser.parse_args()
else:
cls.options = parser.parse_args([])
return cls.options
@classmethod
def get_logger(cls, filename=__file__):
"""
small helper function that initialized the logger and returns it
"""
options = cls.get_options()
dirname, basename = os.path.split(os.path.abspath(filename))
basename = os.path.splitext(basename)[0]
force_build = False
force_remove = False
level = logging.WARN
if options.debug:
level = logging.DEBUG
elif options.info:
level = logging.INFO
if options.force:
force_build = True
if options.remove:
force_remove = True
force_build = True
mylogger = logging.getLogger(basename)
logger.setLevel(level)
mylogger.setLevel(level)
mylogger.debug("tests loaded from file: %s" % basename)
if force_build:
UtilsTest.forceBuild(force_remove)
return mylogger
def Rwp(obt, ref, comment="Rwp"):
""" ___________________________
Calculate \/ 4 ( obt - ref)²
V Sum( --------------- )
(obt + ref)²
This is done for symmetry reason between obt and ref
@param obt: obtained data
@type obt: 2-list of array of the same size
@param obt: reference data
@type obt: 2-list of array of the same size
@return: Rwp value, lineary interpolated
"""
ref0, ref1 = ref
obt0, obt1 = obt
big0 = numpy.concatenate((obt0, ref0))
big0.sort()
big0 = numpy.unique(big0)
big_ref = numpy.interp(big0, ref0, ref1, 0.0, 0.0)
big_obt = numpy.interp(big0, obt0, obt1, 0.0, 0.0)
big_mean = (big_ref + big_obt) / 2.0
big_delta = (big_ref - big_obt)
non_null = abs(big_mean) > 1e-10
return numpy.sqrt(((big_delta[non_null]) ** 2 / ((big_mean[non_null]) ** 2)).sum())
def recursive_delete(dirname):
"""
Delete everything reachable from the directory named in "top",
assuming there are no symbolic links.
CAUTION: This is dangerous! For example, if top == '/', it
could delete all your disk files.
@param dirname: top directory to delete
@type dirname: string
"""
if not os.path.isdir(dirname):
return
for root, dirs, files in os.walk(dirname, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(dirname)
getLogger = UtilsTest.get_logger
def diff_img(ref, obt, comment=""):
"""
Highlight the difference in images
"""
assert ref.shape == obt.shape
delta = abs(obt - ref)
if delta.max() > 0:
from pyFAI.gui_utils import pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(2, 2, 1)
ax2 = fig.add_subplot(2, 2, 2)
ax3 = fig.add_subplot(2, 2, 3)
im_ref = ax1.imshow(ref)
plt.colorbar(im_ref)
ax1.set_title("%s ref" % comment)
im_obt = ax2.imshow(obt)
plt.colorbar(im_obt)
ax2.set_title("%s obt" % comment)
im_delta = ax3.imshow(delta)
plt.colorbar(im_delta)
ax3.set_title("delta")
imax = delta.argmax()
x = imax % ref.shape[-1]
y = imax // ref.shape[-1]
ax3.plot([x], [y], "o", scalex=False, scaley=False)
fig.show()
from pyFAI.utils import input
input()
def diff_crv(ref, obt, comment=""):
"""
Highlight the difference in vectors
"""
assert ref.shape == obt.shape
delta = abs(obt - ref)
if delta.max() > 0:
from pyFAI.gui_utils import pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
im_ref = ax1.plot(ref, label="%s ref" % comment)
im_obt = ax1.plot(obt, label="%s obt" % comment)
im_delta = ax2.plot(delta, label="delta")
fig.show()
from pyFAI.utils import input
input()
pyFAI-0.11.0/test/test_peak_picking.py 0000755 0001773 0001774 00000012775 12527541311 020660 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
#
"test suite for peak picking class"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "20/03/2015"
import unittest
import os
import numpy
import logging
import time
import sys
import fabio
import tempfile
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger, recursive_delete
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
import pyFAI.peak_picker
import pyFAI.geometryRefinement
from pyFAI.peak_picker import PeakPicker
from pyFAI.calibrant import Calibrant
from pyFAI.geometryRefinement import GeometryRefinement
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
class testPeakPicking(unittest.TestCase):
"""basic test"""
calibFile = "1788/moke.tif"
ctrlPt = {0: (300, 230),
1: (300, 212),
2: (300, 195),
3: (300, 177),
4: (300, 159),
5: (300, 140),
6: (300, 123),
7: (300, 105),
8: (300, 87)}
tth = numpy.radians(numpy.arange(4, 13))
wavelength = 1e-10
ds = wavelength * 5e9 / numpy.sin(tth / 2)
calibrant = Calibrant(dSpacing=ds)
maxiter = 100
tmp_dir = tempfile.mkdtemp(prefix="pyFAI_test_peak_picking_")
logfile = os.path.join(tmp_dir, "testpeakPicking.log")
nptfile = os.path.join(tmp_dir, "testpeakPicking.npt")
def setUp(self):
"""Download files"""
if not os.path.isdir(self.tmp_dir):
os.makedirs(self.tmp_dir)
self.img = UtilsTest.getimage(self.__class__.calibFile)
self.pp = PeakPicker(self.img, calibrant=self.calibrant, wavelength=self.wavelength)
if not os.path.isdir(self.tmp_dir):
os.makedirs(self.tmp_dir)
if os.path.isfile(self.logfile):
os.unlink(self.logfile)
if os.path.isfile(self.nptfile):
os.unlink(self.nptfile)
def tearDown(self):
"""Remove temporary files"""
recursive_delete(self.tmp_dir)
def test_peakPicking(self):
"""first test peak-picking then checks the geometry found is OK"""
for i in self.ctrlPt:
pts = self.pp.massif.find_peaks(self.ctrlPt[i], stdout=open(self.logfile, "a"))
logger.info("point %s at ring #%i (tth=%.1f deg) generated %i points", self.ctrlPt[i], i, self.tth[i], len(pts))
if len(pts) > 0:
self.pp.points.append(pts, ring=i)
else:
logger.error("point %s caused error (%s) ", i, self.ctrlPt[i])
self.pp.points.save(self.nptfile)
lstPeak = self.pp.points.getListRing()
# print self.pp.points
# print lstPeak
logger.info("After peak-picking, we have %s points generated from %s groups ", len(lstPeak), len(self.ctrlPt))
gr = GeometryRefinement(lstPeak, dist=0.01, pixel1=1e-4, pixel2=1e-4, wavelength=self.wavelength, calibrant=self.calibrant)
gr.guess_poni()
logger.info(gr.__repr__())
last = sys.maxint if sys.version_info[0] < 3 else sys.maxsize
for i in range(self.maxiter):
delta2 = gr.refine2()
logger.info(gr.__repr__())
if delta2 == last:
logger.info("refinement finished after %s iteration" % i)
break
last = delta2
self.assertEquals(last < 1e-4, True, "residual error is less than 1e-4, got %s" % last)
self.assertAlmostEquals(gr.dist, 0.1, 2, "distance is OK, got %s, expected 0.1" % gr.dist)
self.assertAlmostEquals(gr.poni1, 3e-2, 2, "PONI1 is OK, got %s, expected 3e-2" % gr.poni1)
self.assertAlmostEquals(gr.poni2, 3e-2, 2, "PONI2 is OK, got %s, expected 3e-2" % gr.poni2)
self.assertAlmostEquals(gr.rot1, 0, 2, "rot1 is OK, got %s, expected 0" % gr.rot1)
self.assertAlmostEquals(gr.rot2, 0, 2, "rot2 is OK, got %s, expected 0" % gr.rot2)
self.assertAlmostEquals(gr.rot3, 0, 2, "rot3 is OK, got %s, expected 0" % gr.rot3)
# print self.pp.points
class TestMassif(unittest.TestCase):
"""test for ring extraction algorithm with image which needs binning (non regression test)"""
calibFile = "1788/moke.tif"
#TODO !!!
def test_suite_all_PeakPicking():
testSuite = unittest.TestSuite()
testSuite.addTest(testPeakPicking("test_peakPicking"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_PeakPicking()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/test_io.py 0000644 0001773 0001774 00000012227 12527541311 016630 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for input/output stuff"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
import os
import shutil
import numpy
import time
import sys
import fabio
import tempfile
is_main = (__name__ == '__main__')
if is_main:
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI import io
class TestIsoTime(unittest.TestCase):
def test_get(self):
self.assert_(len(io.get_isotime()), 25)
def test_from(self):
t0 = time.time()
isotime = io.get_isotime(t0)
self.assert_(abs(t0 - io.from_isotime(isotime)) < 1, "timing are precise to the second")
class TestNexus(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def test_new_detector(self):
if io.h5py is None:
logger.warning("H5py not present, skipping test_io.TestNexus")
return
fname = os.path.join(self.tmpdir, "nxs.h5")
nxs = io.Nexus(fname, "r+")
nxs.new_detector()
nxs.close()
self.assert_(io.is_hdf5(fname), "nexus file is an HDF5")
# os.system("h5ls -r -a %s" % fname)
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
class testHDF5Writer(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def test_writer(self):
if io.h5py is None:
logger.warning("H5py is absent on the system, skip HDF5 writing test")
return
h5file = os.path.join(self.tmpdir, "junk.h5")
shape = 1024, 1024
n = 100
m = 10 # number of frames in memory
data = numpy.random.random((m, shape[0], shape[1])).astype(numpy.float32)
nmbytes = data.nbytes / 1e6 * n / m
t0 = time.time()
writer = io.HDF5Writer(filename=h5file, hpath="data")
writer.init({"nbpt_azim": shape[0], "nbpt_rad": shape[1]})
for i in range(n):
writer.write(data[i % m], i)
writer.close()
t = time.time() - t0
logger.info("Writing of HDF5 of %ix%s (%.3fMB) took %.3f (%.3fMByte/s)" % (n, shape, nmbytes, t, nmbytes / t))
statinfo = os.stat(h5file)
self.assert_(statinfo.st_size / 1e6 > nmbytes, "file size (%s) is larger than dataset" % statinfo.st_size)
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
class testFabIOWriter(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.tmpdir = tempfile.mkdtemp()
def test_writer(self):
h5file = os.path.join(self.tmpdir)
shape = 1024, 1024
n = 100
m = 10 # number of frames in memory
data = numpy.random.random((m, shape[0], shape[1])).astype(numpy.float32)
nmbytes = data.nbytes / 1e6 * n / m
t0 = time.time()
writer = io.FabioWriter(filename=h5file)
writer.init({"nbpt_azim": shape[0], "nbpt_rad": shape[1], "prefix": "test"})
for i in range(n):
writer.write(data[i % m], i)
writer.close()
t = time.time() - t0
logger.info("Writing of HDF5 of %ix%s (%.3fMB) took %.3f (%.3fMByte/s)" % (n, shape, nmbytes, t, nmbytes / t))
statinfo = os.stat(h5file)
self.assert_(statinfo.st_size / 1e6 > nmbytes, "file size (%s) is larger than dataset" % statinfo.st_size)
def tearDown(self):
unittest.TestCase.tearDown(self)
shutil.rmtree(self.tmpdir)
def test_suite_all_io():
testSuite = unittest.TestSuite()
testSuite.addTest(TestIsoTime("test_get"))
testSuite.addTest(TestIsoTime("test_from"))
testSuite.addTest(TestNexus("test_new_detector"))
testSuite.addTest(testHDF5Writer("test_writer"))
# testSuite.addTest(testFabIOWriter("test_writer"))
return testSuite
if is_main:
mysuite = test_suite_all_io()
runner = unittest.TextTestRunner()
runner.run(mysuite)
UtilsTest.clean_up() pyFAI-0.11.0/test/profile_lut_pixelsplitFull.py 0000644 0001773 0001774 00000014206 12527541311 022605 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time
from . import utilstest
import fabio
import pyopencl as cl
from pylab import *
from six.moves import input
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
#from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
#logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size/8,4,2)
pos_size = pos.size
#size = data.size
size = pos_size/8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4*workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
compile_options = "-D BINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D EPS=%e" % \
(bins, size, workgroup_size, numpy.finfo(numpy.float32).eps)
print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size*workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax=(min0,max0,min1,max1)
print(minmax)
print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
outMax_1 = numpy.copy(d_outMax)
d_idx_ptr = cl.array.empty(queue, (bins+1,), dtype=numpy.int32)
d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
cl.enqueue_copy(queue, lutsize, d_lutsize.data)
print(lutsize)
lut_size = int(lutsize[0])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
#d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
indices = ndarray(lut_size, dtype=numpy.int32)
data_lut = ndarray(lut_size, dtype=numpy.float32)
idx_ptr = ndarray(bins+1, dtype=numpy.int32)
cl.enqueue_copy(queue,indices, d_indices.data)
cl.enqueue_copy(queue,data_lut, d_data.data)
cl.enqueue_copy(queue,idx_ptr, d_idx_ptr.data)
#check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
#cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
#program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins*workgroup_size,),(workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
#outData = numpy.copy(d_outData)
#outCount = numpy.copy(d_outCount)
#outMerge = numpy.copy(d_outMerge)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
cl.enqueue_copy(queue,outData, d_outData.data)
cl.enqueue_copy(queue,outCount, d_outCount.data)
cl.enqueue_copy(queue,outMerge, d_outMerge.data)
#program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
#cl.enqueue_copy(queue,outData, d_outData)
#cl.enqueue_copy(queue,outCount, d_outCount)
#cl.enqueue_copy(queue,outMerge, d_outMerge)
ai.xrpd_LUT(data, 1000)
#ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
#foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
ref = foo.integrate(data)
#assert(numpy.allclose(ref[1],outMerge))
#plot(ref[0],outMerge, label="ocl_lut_merge")
#plot(ref[0],outData, label="ocl_lut_data")
plot(ref[0],outCount, label="ocl_lut_count")
#plot(ref[0], ref[1], label="ref_merge")
#plot(ref[0], ref[2], label="ref_data")
plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
show()
input()
#aaa = 0
#bbb = 0
#for i in range(bins):
#ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
#ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
#data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
#sort1 = numpy.argsort(ind_tmp1)
#sort2 = numpy.argsort(ind_tmp2)
#data_1 = data_tmp1[sort1]
#data_2 = data_tmp2[sort2]
#for j in range(data_1.size):
#aaa += 1
#if not numpy.allclose(data_1[j],data_2[j]):
#bbb += 1
#print data_1[j],data_2[j],numpy.allclose(data_1[j],data_2[j]), idx_ptr[i]+j
#print aaa,bbb pyFAI-0.11.0/test/fai_cfg.json 0000644 0001773 0001774 00000000060 12553735122 017055 0 ustar kieffer kieffer 0000000 0000000 {
"nbpt_azim": 1024,
"nbpt_rad": 1024
} pyFAI-0.11.0/test/chi_square.py 0000755 0001773 0001774 00000004211 12406056407 017305 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
#coding: utf-8
#tests if the distribution of Chi2 is centered around 1:
# Needs a large dataset (thousands of images)
import sys
import glob
import pylab
pylab.ion()
import numpy
from math import sqrt
import fabio
from utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
ai = pyFAI.AzimuthalIntegrator(detector="Pilatus1M")
ai.setFit2D(directDist=2849, centerX=8.900000e+02, centerY=7.600000e+01)
ai.wavelength = 9.919000e-11
images = glob.glob("/mnt/data/BM29/water/daniel/raw/water_029_0*.edf")
images.sort()
I_splitBB = [];sigma_splitBB = [];I_splitFull = [];sigma_splitFull = [];I_nosplit = [];sigma_nosplit = []
for fn in images[:]:
img = fabio.open(fn).data
print(fn);
variance = numpy.maximum(img, 1)
q, i, s = ai.integrate1d(img, 1040, unit="q_nm^-1", method="numpy", variance=variance)
I_nosplit.append(i)
sigma_nosplit.append(s)
q, i, s = ai.integrate1d(img, 1040, unit="q_nm^-1", method="splitbbox", variance=variance)
I_splitBB.append(i)
sigma_splitBB.append(s)
q, i, s = ai.integrate1d(img, 1040, unit="q_nm^-1", method="splitpixel", variance=variance)
I_splitFull.append(i)
sigma_splitFull.append(s)
I_splitBB = numpy.vstack(I_splitBB)
I_splitFull = numpy.vstack(I_splitFull)
I_nosplit = numpy.vstack(I_nosplit)
sigma_nosplit = numpy.vstack(sigma_nosplit)
sigma_splitBB = numpy.vstack(sigma_splitBB)
sigma_splitFull = numpy.vstack(sigma_splitFull)
Chi2_splitBB = [];Chi2_splitFull = []; Chi2_nosplit = []
Iavg_splitFull = I_splitFull.mean(axis=0)
Iavg_splitBB = I_splitBB.mean(axis=0)
Iavg_nosplit = I_nosplit.mean(axis=0)
for i in range(I_splitBB.shape[0]):
Chi2_splitBB.append((((I_splitBB[i] - Iavg_splitBB) / sigma_splitBB[i]) ** 2).mean())
Chi2_splitFull.append((((I_splitFull[i] - Iavg_splitFull) / sigma_splitFull[i]) ** 2).mean())
Chi2_nosplit.append((((I_nosplit[i] - Iavg_nosplit) / sigma_nosplit[i]) ** 2).mean())
pylab.hist(Chi2_splitBB, 50, label="splitBB")
pylab.hist(Chi2_splitFull, 50, label="splitFull")
pylab.hist(Chi2_nosplit, 50, label="no_split")
pylab.xlabel("$\chi^2$")
pylab.ylabel("count")
pylab.legend()
pylab.show()
pyFAI-0.11.0/test/profile_ocl_lut_pixelsplit.py 0000644 0001773 0001774 00000014642 12527541311 022623 0 ustar kieffer kieffer 0000000 0000000 # -*- coding: utf-8 -*-
"""
Created on Fri Mar 07 09:52:51 2014
@author: ashiotis
"""
from __future__ import absolute_import
from __future__ import print_function
import sys, numpy, time, os
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from . import utilstest
from six.moves import input
import fabio
import pyopencl as cl
from pylab import *
print("#"*50)
pyFAI = sys.modules["pyFAI"]
from pyFAI import splitPixelFullLUT
from pyFAI import ocl_hist_pixelsplit
# from pyFAI import splitBBoxLUT
from pyFAI import splitBBoxCSR
from pyFAI import splitPixelFullLUT_float32
# logger = utilstest.getLogger("profile")
ai = pyFAI.load("testimages/halfccd.poni")
data = fabio.open("testimages/halfccd.edf").data
workgroup_size = 256
bins = 1000
pos_in = ai.array_from_unit(data.shape, "corner", unit="2th_deg")
pos = pos_in.reshape(pos_in.size / 8, 4, 2)
pos_size = pos.size
# size = data.size
size = pos_size / 8
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
mf = cl.mem_flags
d_pos = cl.array.to_device(queue, pos)
d_preresult = cl.array.empty(queue, (4 * workgroup_size,), dtype=numpy.float32)
d_minmax = cl.array.empty(queue, (4,), dtype=numpy.float32)
with open("../openCL/ocl_lut_pixelsplit.cl", "r") as kernelFile:
kernel_src = kernelFile.read()
compile_options = "-D BINS=%i -D NIMAGE=%i -D WORKGROUP_SIZE=%i -D EPS=%e" % \
(bins, size, workgroup_size, numpy.finfo(numpy.float32).eps)
print(compile_options)
program = cl.Program(ctx, kernel_src).build(options=compile_options)
program.reduce1(queue, (workgroup_size * workgroup_size,), (workgroup_size,), d_pos.data, numpy.uint32(pos_size), d_preresult.data)
program.reduce2(queue, (workgroup_size,), (workgroup_size,), d_preresult.data, d_minmax.data)
min0 = pos[:, :, 0].min()
max0 = pos[:, :, 0].max()
min1 = pos[:, :, 1].min()
max1 = pos[:, :, 1].max()
minmax = (min0, max0, min1, max1)
print(minmax)
print(d_minmax)
memset_size = (bins + workgroup_size - 1) & ~(workgroup_size - 1),
d_outMax = cl.array.empty(queue, (bins,), dtype=numpy.int32)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
global_size = (size + workgroup_size - 1) & ~(workgroup_size - 1),
program.lut1(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data)
outMax_1 = numpy.copy(d_outMax)
d_idx_ptr = cl.array.empty(queue, (bins + 1,), dtype=numpy.int32)
d_lutsize = cl.array.empty(queue, (1,), dtype=numpy.int32)
program.lut2(queue, (1,), (1,), d_outMax.data, d_idx_ptr.data, d_lutsize.data)
lutsize = numpy.ndarray(1, dtype=numpy.int32)
cl.enqueue_copy(queue, lutsize, d_lutsize.data)
print(lutsize)
lut_size = int(lutsize[0])
d_indices = cl.array.empty(queue, (lut_size,), dtype=numpy.int32)
d_data = cl.array.empty(queue, (lut_size,), dtype=numpy.float32)
# d_check_atomics = cl.Buffer(ctx, mf.READ_WRITE, 4*lut_size)
program.memset_out_int(queue, memset_size, (workgroup_size,), d_outMax.data)
d_outData = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outCount = cl.array.empty(queue, (bins,), dtype=numpy.float32)
d_outMerge = cl.array.empty(queue, (bins,), dtype=numpy.float32)
program.lut3(queue, global_size, (workgroup_size,), d_pos.data, d_minmax.data, numpy.uint32(size), d_outMax.data, d_idx_ptr.data, d_indices.data, d_data.data)
outMax_2 = numpy.copy(d_outMax)
indices = ndarray(lut_size, dtype=numpy.int32)
data_lut = ndarray(lut_size, dtype=numpy.float32)
idx_ptr = ndarray(bins + 1, dtype=numpy.int32)
cl.enqueue_copy(queue, indices, d_indices.data)
cl.enqueue_copy(queue, data_lut, d_data.data)
cl.enqueue_copy(queue, idx_ptr, d_idx_ptr.data)
# check_atomics = numpy.ndarray(lut_size, dtype=numpy.int32)
# cl.enqueue_copy(queue, check_atomics, d_check_atomics)
program.memset_out(queue, memset_size, (workgroup_size,), d_outData.data, d_outCount.data, d_outMerge.data)
d_image = cl.array.to_device(queue, data)
d_image_float = cl.array.empty(queue, (size,), dtype=numpy.float32)
# program.s32_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float) # Pilatus1M
program.u16_to_float(queue, global_size, (workgroup_size,), d_image.data, d_image_float.data) # halfccd
program.csr_integrate(queue, (bins * workgroup_size,), (workgroup_size,), d_image_float.data, d_data.data, d_indices.data, d_idx_ptr.data, d_outData.data, d_outCount.data, d_outMerge.data)
# outData = numpy.copy(d_outData)
# outCount = numpy.copy(d_outCount)
# outMerge = numpy.copy(d_outMerge)
outData = numpy.ndarray(bins, dtype=numpy.float32)
outCount = numpy.ndarray(bins, dtype=numpy.float32)
outMerge = numpy.ndarray(bins, dtype=numpy.float32)
cl.enqueue_copy(queue, outData, d_outData.data)
cl.enqueue_copy(queue, outCount, d_outCount.data)
cl.enqueue_copy(queue, outMerge, d_outMerge.data)
# program.integrate2(queue, (1024,), (workgroup_size,), d_outData, d_outCount, d_outMerge)
# cl.enqueue_copy(queue,outData, d_outData)
# cl.enqueue_copy(queue,outCount, d_outCount)
# cl.enqueue_copy(queue,outMerge, d_outMerge)
ai.xrpd_LUT(data, 1000)
# ref = ai.integrate1d(data,bins,unit="2th_deg", correctSolidAngle=False, method="lut")
# foo = splitPixelFullLUT.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
foo = splitBBoxCSR.HistoBBox1d(ai._ttha, ai._dttha, bins=bins, unit="2th_deg")
# foo = splitPixelFullLUT_float32.HistoLUT1dFullSplit(pos,bins, unit="2th_deg")
ref = foo.integrate(data)
# assert(numpy.allclose(ref[1],outMerge))
plot(ref[0], outMerge, label="ocl_lut_merge")
# plot(ref[0],outData, label="ocl_lut_data")
# plot(ref[0],outCount, label="ocl_lut_count")
plot(ref[0], ref[1], label="ref_merge")
# plot(ref[0], ref[2], label="ref_data")
# plot(ref[0], ref[3], label="ref_count")
####plot(abs(ref-outMerge)/outMerge, label="ocl_csr_fullsplit")
legend()
show()
input()
# aaa = 0
# bbb = 0
# for i in range(bins):
# ind_tmp1 = numpy.copy(indices[idx_ptr[i]:idx_ptr[i+1]])
# ind_tmp2 = numpy.copy(foo.indices[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp1 = numpy.copy(data_lut[idx_ptr[i]:idx_ptr[i+1]])
# data_tmp2 = numpy.copy(foo.data[idx_ptr[i]:idx_ptr[i+1]])
# sort1 = numpy.argsort(ind_tmp1)
# sort2 = numpy.argsort(ind_tmp2)
# data_1 = data_tmp1[sort1]
# data_2 = data_tmp2[sort2]
# for j in range(data_1.size):
# aaa += 1
# if not numpy.allclose(data_1[j],data_2[j]):
# bbb += 1
# print data_1[j],data_2[j],numpy.allclose(data_1[j],data_2[j]), idx_ptr[i]+j
# print aaa,bbb
pyFAI-0.11.0/test/test_geometry.py 0000755 0001773 0001774 00000027540 12527541311 020063 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"""tests for Jon's geometry changes
FIXME : make some tests that the functions do what is expected
"""
import unittest, numpy, os, sys, time
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI import geometry
from pyFAI import AzimuthalIntegrator
import fabio
class TestSolidAngle(unittest.TestCase):
"""
Test case for solid angle compared to Fit2D results
Masked region have values set to 0 (not negative) and native mask from pilatus desactivated
Detector Pilatus6M PixelSize= 1.720e-04, 1.720e-04 m
Wavelength= 1.072274e-10m
SampleDetDist= 1.994993e-01m PONI= 2.143248e-01, 2.133315e-01m rot1=0.007823 rot2= 0.006716 rot3= -0.000000 rad
DirectBeamDist= 199.510mm Center: x=1231.226, y=1253.864 pix Tilt=0.591 deg tiltPlanRotation= 139.352 deg
integration in 2theta between 0 and 56 deg in 1770 points
"""
fit2dFile = '2548/powder_200_2_0001.chi'
pilatusFile = '2549/powder_200_2_0001.cbf'
ai = None
fit2d = None
def setUp(self):
"""Download files"""
self.fit2dFile = UtilsTest.getimage(self.__class__.fit2dFile)
self.pilatusFile = UtilsTest.getimage(self.__class__.pilatusFile)
self.tth_fit2d, self.I_fit2d = numpy.loadtxt(self.fit2dFile, unpack=True)
self.ai = AzimuthalIntegrator(dist=1.994993e-01,
poni1=2.143248e-01,
poni2=2.133315e-01,
rot1=0.007823,
rot2=0.006716,
rot3=0,
pixel1=172e-6,
pixel2=172e-6)
self.data = fabio.open(self.pilatusFile).data
self.data[self.data < 0] = 0 # discard negative pixels
def testSolidAngle(self):
"""
This dataset goes up to 56deg, very good to test the solid angle correction
any error will show off.
fit2d makes correction in 1/cos^3(2th) (without tilt). pyFAI used to correct in 1/cos(2th)
"""
tth, I_nogood = self.ai.integrate1d(self.data, 1770, unit="2th_deg", radial_range=[0, 56], method="splitBBox", correctSolidAngle=False)
delta_tth = abs(tth - self.tth_fit2d).max()
delta_I = abs(I_nogood - self.I_fit2d).max()
I = abs(I_nogood - self.I_fit2d).mean()
self.assert_(delta_tth < 1e-5, 'Error on 2th position: %s <1e-5' % delta_tth)
self.assert_(delta_I > 100, 'Error on (wrong) I are large: %s >100' % delta_I)
self.assert_(I > 2, 'Error on (wrong) I are large: %s >2' % I)
tth, I_good = self.ai.integrate1d(self.data, 1770, unit="2th_deg", radial_range=[0, 56], method="splitBBox", correctSolidAngle=3)
delta_tth = abs(tth - self.tth_fit2d).max()
delta_I = abs(I_good - self.I_fit2d).max()
I = abs(I_good - self.I_fit2d).mean()
self.assert_(delta_tth < 1e-5, 'Error on 2th position: %s <1e-5' % delta_tth)
self.assert_(delta_I < 5, 'Error on (good) I are small: %s <5' % delta_I)
self.assert_(I < 0.05, 'Error on (good) I are small: %s <0.05' % I)
class TestBug88SolidAngle(unittest.TestCase):
"""
Test case for solid angle where data got modified inplace.
https://github.com/kif/pyFAI/issues/88
"""
def testSolidAngle(self):
img = numpy.ones((1000, 1000), dtype=numpy.float32)
ai = pyFAI.AzimuthalIntegrator(dist=0.01, detector="Titan", wavelength=1e-10)
t = ai.integrate1d(img, 1000, method="numpy")[1].max()
f = ai.integrate1d(img, 1000, method="numpy", correctSolidAngle=False)[1].max()
self.assertAlmostEqual(f, 1, 5, "uncorrected flat data are unchanged")
self.assertNotAlmostEqual(f, t, 1, "corrected and uncorrected flat data are different")
class ParameterisedTestCase(unittest.TestCase):
""" TestCase classes that want to be parameterised should
inherit from this class.
From Eli Bendersky's website
http://eli.thegreenplace.net/2011/08/02/python-unit-testing-parametrized-test-cases/
"""
def __init__(self, methodName='runTest', param=None):
super(ParameterisedTestCase, self).__init__(methodName)
self.param = param
@staticmethod
def parameterise(testcase_klass, param=None):
""" Create a suite containing all tests taken from the given
subclass, passing them the parameter 'param'.
"""
testloader = unittest.TestLoader()
testnames = testloader.getTestCaseNames(testcase_klass)
suite = unittest.TestSuite()
for name in testnames:
suite.addTest(testcase_klass(name, param=param))
return suite
class TestGeometry(ParameterisedTestCase):
def testGeometryFunctions(self):
func, statargs, varargs, kwds, expectedFail = self.param
kwds["pixel1"] = 1
kwds["pixel2"] = 1
g = geometry.Geometry(**kwds)
g.wavelength = 1e-10
t0 = time.time()
oldret = getattr(g, func)(*statargs, path=varargs[0])
t1 = time.time()
newret = getattr(g, func)(*statargs, path=varargs[1])
t2 = time.time()
logger.debug("TIMINGS\t meth: %s t=%.3fs\t meth: %s t=%.3fs" % (varargs[0], t1 - t0, varargs[1], t2 - t1))
maxDelta = abs(oldret - newret).max()
msg = "geo=%s%s max delta=%.3f" % (g, os.linesep, maxDelta)
if expectedFail:
self.assertNotAlmostEquals(maxDelta, 0, 3, msg)
else:
self.assertAlmostEquals(maxDelta, 0, 3, msg)
logger.info(msg)
size = 1024
d1, d2 = numpy.mgrid[-size:size:32, -size:size:32]
TESTCASES = [
("tth", (d1, d2), ("cos", "tan"), {'dist':1, 'rot1':0, 'rot2':0, 'rot3':0}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':-1, 'rot2':1, 'rot3':1}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':-.2, 'rot2':1, 'rot3':-.1}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':-1, 'rot2':-.2, 'rot3':1}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':1, 'rot2':5, 'rot3':.4}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':-1.2, 'rot2':1, 'rot3':1}, False),
("tth", (d1, d2), ("cos", "tan"), {'dist':1e10, 'rot1':-2, 'rot2':2, 'rot3':1}, False),
("tth", (d1, d2), ("cos", "tan"), {'dist':1, 'rot1':3, 'rot2':0, 'rot3':0}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':-1, 'rot2':1, 'rot3':3}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':-.2, 'rot2':1, 'rot3':-.1}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':-3, 'rot2':-.2, 'rot3':1}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':1, 'rot2':5, 'rot3':.4}, False),
("tth", (d1, d2), ("cos", "tan"), {'rot1':-1.2, 'rot2':1.6, 'rot3':1}, False),
("tth", (d1, d2), ("cos", "tan"), {'dist':1e10, 'rot1':0, 'rot2':0, 'rot3':0}, False),
("tth", (d1, d2), ("tan", "cython"), {'dist':1, 'rot1':0, 'rot2':0, 'rot3':0}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':-1, 'rot2':1, 'rot3':1}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':-.2, 'rot2':1, 'rot3':-.1}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':-1, 'rot2':-.2, 'rot3':1}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':1, 'rot2':5, 'rot3':.4}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':-1.2, 'rot2':1, 'rot3':1}, False),
("tth", (d1, d2), ("tan", "cython"), {'dist':1e10, 'rot1':-2, 'rot2':2, 'rot3':1}, False),
("tth", (d1, d2), ("tan", "cython"), {'dist':1, 'rot1':3, 'rot2':0, 'rot3':0}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':-1, 'rot2':1, 'rot3':3}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':-.2, 'rot2':1, 'rot3':-.1}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':-3, 'rot2':-.2, 'rot3':1}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':1, 'rot2':5, 'rot3':.4}, False),
("tth", (d1, d2), ("tan", "cython"), {'rot1':-1.2, 'rot2':1.6, 'rot3':1}, False),
("tth", (d1, d2), ("tan", "cython"), {'dist':1e10, 'rot1':0, 'rot2':0, 'rot3':0}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'dist':1, 'rot1':0, 'rot2':0, 'rot3':0}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':-1, 'rot2':1, 'rot3':1}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':-.2, 'rot2':1, 'rot3':-.1}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':-1, 'rot2':-.2, 'rot3':1}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':1, 'rot2':5, 'rot3':.4}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':-1.2, 'rot2':1, 'rot3':1}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'dist':1e10, 'rot1':-2, 'rot2':2, 'rot3':1}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'dist':1, 'rot1':3, 'rot2':0, 'rot3':0}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':-1, 'rot2':1, 'rot3':3}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':-.2, 'rot2':1, 'rot3':-.1}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':-3, 'rot2':-.2, 'rot3':1}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':1, 'rot2':5, 'rot3':.4}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'rot1':-1.2, 'rot2':1.6, 'rot3':1}, False),
("qFunction", (d1, d2), ("cython", "tan"), {'dist':1e10, 'rot1':0, 'rot2':0, 'rot3':0}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'dist':1, 'rot1':0, 'rot2':0, 'rot3':0}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':-1, 'rot2':1, 'rot3':1}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':-.2, 'rot2':1, 'rot3':-.1}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':-1, 'rot2':-.2, 'rot3':1}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':1, 'rot2':5, 'rot3':.4}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':-1.2, 'rot2':1, 'rot3':1}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'dist':1e10, 'rot1':-2, 'rot2':2, 'rot3':1}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'dist':1, 'rot1':3, 'rot2':0, 'rot3':0}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':-1, 'rot2':1, 'rot3':3}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':-.2, 'rot2':1, 'rot3':-.1}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':-3, 'rot2':-.2, 'rot3':1}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':1, 'rot2':5, 'rot3':.4}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'rot1':-1.2, 'rot2':1.6, 'rot3':1}, False),
("rFunction", (d1, d2), ("cython", "numpy"), {'dist':1e10, 'rot1':0, 'rot2':0, 'rot3':0}, False),
]
def test_suite_all_Geometry():
testSuite = unittest.TestSuite()
testSuite.addTest(TestSolidAngle("testSolidAngle"))
testSuite.addTest(TestBug88SolidAngle("testSolidAngle"))
for param in TESTCASES:
testSuite.addTest(ParameterisedTestCase.parameterise(
TestGeometry, param))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_Geometry()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/profile_hdf5.py 0000755 0001773 0001774 00000006310 12413320556 017527 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/python
#coding: utf-8
from __future__ import division, with_statement, print_function
__doc__ = "Benchmark for HDF5 writing"
__author__ ="Jérôme Kieffer"
__date__ = "2014-09-24"
import os
import time
from argparse import ArgumentParser
import tempfile
import numpy
from pyFAI import io
import logging
logger = logging.getLogger("Bench_hdf5")
logger.setLevel(logging.INFO)
def parse():
"""
Parse command line arguments
"""
parser = ArgumentParser(description=__doc__)
parser.add_argument('-d', '--dir', dest='directory', default=tempfile.gettempdir(),
help='Destination directory (/tmp)')
parser.add_argument('-n', '--number', dest='n', default=1024, type=int,
help='Number of frames to write')
parser.add_argument('-w', '--width', dest='width', default=1024, type=int,
help='width of a frame (1024)')
parser.add_argument('-H', '--height', dest='height', default=1024, type=int,
help='height of the image (1024)')
parser.add_argument('-t', '--type', dest='dtype', default="float32", type=str,
help='data type of item (float32)')
parser.add_argument('-b', '--bsize', dest='bsize', default=10, type=int,
help='size of the random buffer for frames (10)')
opt = parser.parse_args()
return opt
def bench_hdf5(n=1024, shape=(1024, 1024), dtype="float32", dirname=None, bsize=10):
"""
Actually performs the HDF5 writing benchmark
@param n: number of frames to be written
@param shape: 2-tuple of integer describing the shape of the image
@param bsize: number of frames in buffer
"""
tmp_dir = tempfile.mkdtemp(dir=dirname)
h5file = os.path.join(tmp_dir, "junk.h5")
logger.info("Writing large dataset %ix(%i,%i) of %s to %s." % (n, shape[0], shape[1], dtype, h5file))
dtype = numpy.dtype(dtype)
if dtype.kind == "f":
data = numpy.random.random((bsize, shape[0], shape[1])).astype(dtype)
elif dtype.name.find("int") >= 0:
size = bsize * shape[0] * shape[1]
maxi = 2 ** (dtype.itemsize * 8 - 1) - 1
data = numpy.random.random_integers(0, maxi, size=size).astype(dtype)
data.shape = (bsize, shape[0], shape[1])
else:
raise RuntimeError("unhandled data type %s" % dtype)
size = n * shape[0] * shape[1]
nbytes = size * dtype.itemsize
nmbytes = nbytes / 1e6
t0 = time.time()
writer = io.HDF5Writer(filename=h5file, hpath="data")
writer.init({"nbpt_azim": shape[0], "nbpt_rad": shape[1], "dtype": dtype.name})
for i in range(n):
writer.write(data[i % bsize], i)
writer.close()
t = time.time() - t0
bps = nbytes / t
logger.info("Writing of %.3fMB in HDF5 took %.3fs (%.3f MByte/s)" % (nmbytes, t, nmbytes / t))
statinfo = os.stat(h5file)
assert statinfo.st_size > nbytes
# Clean up
os.unlink(h5file)
os.removedirs(tmp_dir)
return bps
if __name__ == "__main__":
opts = parse()
print(bench_hdf5(dirname=opts.directory,
n=opts.n,
shape=(opts.height, opts.width),
dtype=opts.dtype,
bsize=opts.bsize))
pyFAI-0.11.0/test/test_convolution.py 0000644 0001773 0001774 00000006621 12527541311 020601 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for convolution cython code"
__author__ = "Jérôme Kieffer"
__contact__ = "Jérôme Kieffer"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import sys
import unittest
import numpy
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import getLogger # UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI import _convolution
import scipy.ndimage, scipy.misc, scipy.signal
class TestConvolution(unittest.TestCase):
def setUp(self):
self.sigma = 1
self.width = 8 * self.sigma + 1
if self.width % 2 == 0:
self.width += 1
self.gauss = scipy.signal.gaussian(self.width, self.sigma)
self.gauss /= self.gauss.sum()
self.lena = scipy.misc.lena().astype("float32")
def test_gaussian(self):
gauss = _convolution.gaussian(self.sigma)
self.assert_(numpy.allclose(gauss, self.gauss), "gaussian curves are the same")
def test_horizontal_convolution(self):
gauss = self.gauss.astype(numpy.float32)
ref = scipy.ndimage.filters.convolve1d(self.lena, self.gauss, axis=-1)
obt = _convolution.horizontal_convolution(self.lena, gauss)
self.assert_(numpy.allclose(ref, obt), "horizontal filtered images are the same")
def test_vertical_convolution(self):
gauss = self.gauss.astype(numpy.float32)
ref = scipy.ndimage.filters.convolve1d(self.lena, self.gauss, axis=0)
obt = _convolution.vertical_convolution(self.lena, gauss)
self.assert_(numpy.allclose(ref, obt), "vertical filtered images are the same")
def test_gaussian_filter(self):
ref = scipy.ndimage.filters.gaussian_filter(self.lena, self.sigma)
obt = _convolution.gaussian_filter(self.lena, self.sigma)
self.assert_(numpy.allclose(ref, obt), "gaussian filtered images are the same")
def test_suite_all_convolution():
testSuite = unittest.TestSuite()
testSuite.addTest(TestConvolution("test_horizontal_convolution"))
testSuite.addTest(TestConvolution("test_vertical_convolution"))
testSuite.addTest(TestConvolution("test_gaussian"))
testSuite.addTest(TestConvolution("test_gaussian_filter"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_convolution()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/test_detector.py 0000755 0001773 0001774 00000026247 12544200060 020034 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal Integration
# https://github.com/pyFAI/pyFAI
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for masked arrays"
__author__ = "Picca Frédéric-Emmanuel, Jérôme Kieffer",
__contact__ = "picca@synchrotron-soleil.fr"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "05/06/2015"
import sys
import os
import tempfile
import shutil
import unittest
import numpy
import time
if __name__ == '__main__':
import pkgutil
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import getLogger # UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
from pyFAI.detectors import detector_factory, ALL_DETECTORS
from pyFAI import io
class TestDetector(unittest.TestCase):
def test_detector_instanciate(self):
"""
this method try to instantiate all the detectors
"""
for k, v in ALL_DETECTORS.items():
logger.debug(k)
v()
def test_detector_imxpad_s140(self):
"""
The masked image has a masked ring around 1.5deg with value
-10 without mask the pixels should be at -10 ; with mask they
are at 0
"""
imxpad = detector_factory("imxpad_s140")
# check that the cartesian coordinates is cached
self.assertEqual(hasattr(imxpad, '_pixel_edges'), True)
self.assertEqual(imxpad._pixel_edges, None)
y, x, z = imxpad.calc_cartesian_positions()
self.assertEqual(imxpad._pixel_edges is None, False)
# now check that the cached values are identical for each
# method call
y1, x1, z1 = imxpad.calc_cartesian_positions()
self.assertEqual(numpy.all(numpy.equal(y1, y)), True)
self.assertEqual(numpy.all(numpy.equal(x1, x)), True)
self.assertEqual(z, None)
self.assertEqual(z1, None)
# check that a few pixel positions are ok.
self.assertAlmostEqual(y[0, 0], 2.5 * 130e-6 / 2.)
self.assertAlmostEqual(y[3, 0], y[2, 0] + 130e-6)
self.assertAlmostEqual(y[119, 0], y[118, 0] + 130e-6 * 3.5 / 2.)
self.assertAlmostEqual(x[0, 0], 2.5 * 130e-6 / 2.)
self.assertAlmostEqual(x[0, 3], x[0, 2] + 130e-6)
self.assertAlmostEqual(x[0, 79], x[0, 78] + 130e-6 * 3.5 / 2.)
def test_detector_rayonix_sx165(self):
"""
rayonix detectors have different pixel size depending on the binning.
Check that the set_binning method works for the sx_165
#personal communication of M. Blum:
self.desired_pixelsizes[4096] = 39.500
self.desired_pixelsizes[2048] = 79.000
self.desired_pixelsizes[1364] = 118.616
self.desired_pixelsizes[1024] = 158.000
self.desired_pixelsizes[512] = 316.000
"""
sx165 = detector_factory("rayonixsx165")
# check the default pixels size and the default binning
self.assertAlmostEqual(sx165.pixel1, 395e-7)
self.assertAlmostEqual(sx165.pixel2, 395e-7)
self.assertEqual(sx165.binning, (1, 1))
# check binning 1
sx165.binning = 1
self.assertAlmostEqual(sx165.pixel1, 395e-7)
self.assertAlmostEqual(sx165.pixel2, 395e-7)
self.assertEqual(sx165.binning, (1, 1))
# check binning 2
sx165.binning = 2
self.assertAlmostEqual(sx165.pixel1, 79e-6)
self.assertAlmostEqual(sx165.pixel2, 79e-6)
self.assertEqual(sx165.binning, (2, 2))
# check binning 4
sx165.binning = 4
self.assertAlmostEqual(sx165.pixel1, 158e-6)
self.assertAlmostEqual(sx165.pixel2, 158e-6)
self.assertEqual(sx165.binning, (4, 4))
# check binning 8
sx165.binning = 8
self.assertAlmostEqual(sx165.pixel1, 316e-6)
self.assertAlmostEqual(sx165.pixel2, 316e-6)
self.assertEqual(sx165.binning, (8, 8))
# check a non standard binning
sx165.binning = 10
self.assertAlmostEqual(sx165.pixel1, sx165.pixel2)
def test_nexus_detector(self):
tmpdir = tempfile.mkdtemp()
known_fail = []
if io.h5py is None:
logger.warning("H5py not present, skipping test_detector.TestDetector.test_nexus_detector")
return
for det_name in ALL_DETECTORS:
fname = os.path.join(tmpdir, det_name + ".h5")
if os.path.exists(fname): # already tested with another alias
continue
det = detector_factory(det_name)
if (det.pixel1 is None) or (det.shape is None):
continue
det.save(fname)
new_det = detector_factory(fname)
for what in ("pixel1", "pixel2", "name", "max_shape", "shape", "binning"):
if "__len__" in dir(det.__getattribute__(what)):
self.assertEqual(det.__getattribute__(what), new_det.__getattribute__(what), "%s is the same for %s" % (what, fname))
else:
self.assertAlmostEqual(det.__getattribute__(what), new_det.__getattribute__(what), 4, "%s is the same for %s" % (what, fname))
if (det.mask is not None) or (new_det.mask is not None):
self.assert_(numpy.allclose(det.mask, new_det.mask), "%s mask is not the same" % det_name)
if det.shape[0] > 2000:
continue
try:
r = det.calc_cartesian_positions()
o = new_det.calc_cartesian_positions()
except MemoryError:
logger.warning("Test nexus_detector failed due to short memory on detector %s" % det_name)
continue
self.assertEqual(len(o), len(r), "data have same dimension")
err1 = abs(r[0] - o[0]).max()
err2 = abs(r[1] - o[1]).max()
if det.name in known_fail:
continue
if err1 > 1e-6:
logger.error("%s precision on pixel position 1 is better than 1µm, got %e" % (det_name, err1))
if err2 > 1e-6:
logger.error("%s precision on pixel position 1 is better than 1µm, got %e" % (det_name, err2))
# self.assert_(err1 < 1e-6, "%s precision on pixel position 1 is better than 1µm, got %e" % (det_name, err1))
# self.assert_(err2 < 1e-6, "%s precision on pixel position 2 is better than 1µm, got %e" % (det_name, err2))
if not det.IS_FLAT:
err = abs(r[2] - o[2]).max()
self.assert_(err < 1e-6, "%s precision on pixel position 3 is better than 1µm, got %e" % (det_name, err))
# check Pilatus with displacement maps
# check spline
# check SPD sisplacement
shutil.rmtree(tmpdir)
def test_guess_binning(self):
# Mar 345 2300 pixels with 150 micron size
mar = detector_factory("mar345")
shape = 2300, 2300
mar.guess_binning(shape)
self.assertEqual(shape, mar.mask.shape, "Mar345 detector has right mask shape")
self.assertEqual(mar.pixel1, 150e-6, "Mar345 detector has pixel size 150µ")
mar = detector_factory("mar345")
shape = 3450, 3450
mar.guess_binning(shape)
self.assertEqual(shape, mar.mask.shape, "Mar345 detector has right mask shape")
self.assertEqual(mar.pixel1, 100e-6, "Mar345 detector has pixel size 100µ")
mar = detector_factory("mar165")
shape = 1364, 1364
mar.guess_binning(shape)
self.assertEqual(shape, mar.mask.shape, "Mar165 detector has right mask shape")
self.assertEqual(mar.pixel1, 118.616e-6, "Mar166 detector has pixel size 118.616µ")
self.assertEqual(mar.binning, (3, 3), "Mar165 has 3x3 binning")
mar = detector_factory("RayonixLx170")
shape = 192, 384
mar.guess_binning(shape)
self.assertEqual(mar.binning, (10, 10), "RayonixLx170 has 10x10 binning")
p = detector_factory("Perkin")
self.assertEqual(p.pixel1, 200e-6, "raw detector has good pixel size")
self.assertEqual(p.binning, (2, 2), "raw detector has good pixel binning")
p.guess_binning((4096, 4096))
self.assertEqual(p.pixel1, 100e-6, "unbinned detector has good pixel size")
self.assertEqual(p.binning, (1, 1), "unbinned detector has good pixel binning")
def test_Xpad_flat(self):
d = detector_factory("Xpad S540 flat")
cy = d.calc_cartesian_positions(use_cython=True)
np = d.calc_cartesian_positions(use_cython=False)
self.assert_(numpy.allclose(cy[0], np[0]), "max_delta1=" % abs(cy[0] - np[0]).max())
self.assert_(numpy.allclose(cy[1], np[1]), "max_delta2=" % abs(cy[1] - np[1]).max())
def test_non_flat(self):
"""
tests specific to non flat detectors to ensure consistency
"""
a = detector_factory("Aarhus")
t0 = time.time()
n = a.get_pixel_corners(use_cython=False)
t1 = time.time()
a._pixel_corners = None
c = a.get_pixel_corners(use_cython=True)
t2 = time.time()
logger.info("Aarhus.get_pixel_corners timing Numpy: %.3fs Cython: %.3fs" % (t1 - t0, t2 - t1))
self.assert_(abs(n - c).max() < 1e-6, "get_pixel_corners cython == numpy")
# test pixel center coordinates
t0 = time.time()
n1, n2, n3 = a.calc_cartesian_positions(use_cython=False)
t1 = time.time()
c1, c2, c3 = a.calc_cartesian_positions(use_cython=True)
t2 = time.time()
logger.info("Aarhus.calc_cartesian_positions timing Numpy: %.3fs Cython: %.3fs" % (t1 - t0, t2 - t1))
self.assert_(abs(n1 - c1).max() < 1e-6, "cartesian coord1 cython == numpy")
self.assert_(abs(n2 - c2).max() < 1e-6, "cartesian coord2 cython == numpy")
self.assert_(abs(n3 - c3).max() < 1e-6, "cartesian coord3 cython == numpy")
def test_suite_all_detectors():
testSuite = unittest.TestSuite()
testSuite.addTest(TestDetector("test_detector_instanciate"))
testSuite.addTest(TestDetector("test_detector_imxpad_s140"))
testSuite.addTest(TestDetector("test_detector_rayonix_sx165"))
testSuite.addTest(TestDetector("test_nexus_detector"))
testSuite.addTest(TestDetector("test_guess_binning"))
testSuite.addTest(TestDetector("test_Xpad_flat"))
testSuite.addTest(TestDetector("test_non_flat"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_detectors()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/test/moke.poni 0000644 0001773 0001774 00000000403 12321446554 016430 0 ustar kieffer kieffer 0000000 0000000 # Nota: C-Order, 1 refers to the Y axis, 2 to the X axis
PixelSize1: 0.0001
PixelSize2: 0.0001
Distance: 0.100009884958
Poni1: 0.0300547166356
Poni2: 0.0299202317436
Rot1: -0.000795975753765
Rot2: -0.000523723398451
Rot3: -3.17876930818e-11
SplineFile: None
pyFAI-0.11.0/test/test_polarization.py 0000755 0001773 0001774 00000010741 12527541311 020736 0 ustar kieffer kieffer 0000000 0000000 #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Project: Fast Azimuthal integration
# https://github.com/kif/pyFAI
#
#
# Copyright (C) European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
#
"test suite for polarization corrections"
__author__ = "Jérôme Kieffer"
__contact__ = "Jerome.Kieffer@ESRF.eu"
__license__ = "GPLv3+"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "15/12/2014"
import unittest
import os
import numpy
import logging, time
import sys
import fabio
if __name__ == '__main__':
import pkgutil, os
__path__ = pkgutil.extend_path([os.path.dirname(__file__)], "pyFAI.test")
from .utilstest import UtilsTest, Rwp, getLogger
logger = getLogger(__file__)
pyFAI = sys.modules["pyFAI"]
if logger.getEffectiveLevel() <= logging.INFO:
import pylab
class TestPolarization(unittest.TestCase):
shape = (13, 13)
Y, X = numpy.ogrid[-6:7, -6:7]
rotY = numpy.radians(30.0 * Y)
rotX = numpy.radians(30.0 * X)
tth = numpy.sqrt(rotY ** 2 + rotX ** 2)
chi = numpy.arctan2(rotY, rotX)
# print numpy.degrees(tth[6])
# print numpy.degrees(chi[6])
# print numpy.degrees(tth[:, 6])
# print numpy.degrees(chi[:, 6])
ai = pyFAI.AzimuthalIntegrator(dist=1, pixel1=0.1, pixel2=0.1)
ai._ttha = tth
ai._chia = chi
def testNoPol(self):
"without polarization correction should be 1"
self.assert_(abs(self.ai.polarization(factor=None) - numpy.ones(self.shape)).max() == 0, "without polarization correction should be 1")
def testCircularPol(self):
"Circular polarization should decay in (1+(cos2θ)^2)/2"
pol = (1.0 + numpy.cos(self.tth) ** 2) / 2.0
self.assert_(abs(self.ai.polarization(factor=0) - pol).max() == 0, "with circular polarization correction is independent of chi")
self.assert_(abs(self.ai.polarization(factor=0, axis_offset=1) - pol).max() == 0, "with circular polarization correction is independent of chi")
def testHorizPol(self):
"horizontal polarization should decay in (cos2θ)**2 in horizontal plane and no correction in vertical one"
self.assert_(abs(self.ai.polarization(factor=1)[:, 6] - numpy.ones(13)).max() == 0, "No correction in the vertical plane")
self.assert_(abs(self.ai.polarization(factor=1)[6] - numpy.cos(self.rotX) ** 2).max() < 1e-15, "cos(2th)^2 like in the horizontal plane")
def testVertPol(self):
"Vertical polarization should decay in (cos2θ)**2 in vertical plane and no correction in horizontal one"
self.assert_(abs(self.ai.polarization(factor=-1)[6] - numpy.ones(13)).max() == 0, "No correction in the horizontal plane")
self.assert_(abs(self.ai.polarization(factor=-1)[:, 6] - (numpy.cos((2 * self.rotX)) + 1) / 2).max() < 1e-15, "cos(2th)^2 like in the verical plane")
def testoffsetPol(self):
"test for the rotation of the polarization axis"
self.assert_(abs(self.ai.polarization(factor=1, axis_offset=numpy.pi / 2)[6] - numpy.ones(13)).max() == 0, "No correction in the horizontal plane")
self.assert_(abs(self.ai.polarization(factor=1, axis_offset=numpy.pi / 2)[:, 6] - (numpy.cos((2 * self.rotX)) + 1) / 2).max() < 1e-15, "cos(2th)^2 like in the verical plane")
def test_suite_all_polarization():
testSuite = unittest.TestSuite()
testSuite.addTest(TestPolarization("testNoPol"))
testSuite.addTest(TestPolarization("testCircularPol"))
testSuite.addTest(TestPolarization("testHorizPol"))
testSuite.addTest(TestPolarization("testVertPol"))
testSuite.addTest(TestPolarization("testoffsetPol"))
# testSuite.addTest(TestPolarization("test2th"))
return testSuite
if __name__ == '__main__':
mysuite = test_suite_all_polarization()
runner = unittest.TextTestRunner()
runner.run(mysuite)
pyFAI-0.11.0/src/ 0000755 0001773 0001774 00000000000 12553735716 014431 5 ustar kieffer kieffer 0000000 0000000 pyFAI-0.11.0/src/fastcrc.c 0000644 0001773 0001774 00000617412 12527541311 016220 0 ustar kieffer kieffer 0000000 0000000 /* Generated by Cython 0.21.2 */
#define PY_SSIZE_T_CLEAN
#ifndef CYTHON_USE_PYLONG_INTERNALS
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 0
#else
#include "pyconfig.h"
#ifdef PYLONG_BITS_IN_DIGIT
#define CYTHON_USE_PYLONG_INTERNALS 1
#else
#define CYTHON_USE_PYLONG_INTERNALS 0
#endif
#endif
#endif
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000)
#error Cython requires Python 2.6+ or Python 3.2+.
#else
#define CYTHON_ABI "0_21_2"
#include
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) \
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyType_Type
#endif
#if PY_MAJOR_VERSION >= 3
#define Py_TPFLAGS_CHECKTYPES 0
#define Py_TPFLAGS_HAVE_INDEX 0
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#if PY_VERSION_HEX < 0x030400a1 && !defined(Py_TPFLAGS_HAVE_FINALIZE)
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ? \
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#else
#define CYTHON_PEP393_ENABLED 0
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#define __Pyx_PyFrozenSet_Size(s) PyObject_Size(s)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ? \
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#define __Pyx_PyFrozenSet_Size(s) PySet_Size(s)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#ifndef CYTHON_INLINE
#if defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
/* Initialize NaN. The sign is irrelevant, an exponent with all bits 1 and
a nonzero mantissa means NaN. If the first bit in the mantissa is 1, it is
a quiet NaN. */
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#ifdef __cplusplus
template
void __Pyx_call_destructor(T* x) {
x->~T();
}
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include
#define __PYX_HAVE__src__fastcrc
#define __PYX_HAVE_API__src__fastcrc
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
#include "stdint.h"
#include "crc32.h"
#ifdef _OPENMP
#include
#endif /* _OPENMP */
#ifdef PYREX_WITHOUT_ASSERTIONS
#define CYTHON_WITHOUT_ASSERTIONS
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
typedef struct {PyObject **p; char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) ( \
(sizeof(type) < sizeof(Py_ssize_t)) || \
(sizeof(type) > sizeof(Py_ssize_t) && \
likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX) && \
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN || \
v == (type)PY_SSIZE_T_MIN))) || \
(sizeof(type) == sizeof(Py_ssize_t) && \
(is_signed || likely(v < (type)PY_SSIZE_T_MAX || \
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromUString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromUString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromUString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromUString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromUString(s) __Pyx_PyUnicode_FromString((const char*)s)
#if PY_MAJOR_VERSION < 3
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u)
{
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#else
#define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen
#endif
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_Owned_Py_None(b) (Py_INCREF(Py_None), Py_None)
#define __Pyx_PyBool_FromLong(b) ((b) ? (Py_INCREF(Py_True), Py_True) : (Py_INCREF(Py_False), Py_False))
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_Int(PyObject* x);
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_COMPILING_IN_CPYTHON
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c));
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static PyObject *__pyx_m;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include
#else
#include
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"src/fastcrc.pyx",
"__init__.pxd",
"type.pxd",
};
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":723
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":724
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":725
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":726
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":730
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":731
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":732
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":733
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":737
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":738
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":747
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":748
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":749
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":751
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":752
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":753
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":755
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":756
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":758
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":759
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":760
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
/*--- Type declarations ---*/
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":762
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":763
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":764
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":766
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
if (acquire_gil) { \
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure(); \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
PyGILState_Release(__pyx_gilstate_save); \
} else { \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__); \
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil) \
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext() \
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_XDECREF(tmp); \
} while (0)
#define __Pyx_DECREF_SET(r, v) do { \
PyObject *tmp = (PyObject *) r; \
r = v; __Pyx_DECREF(tmp); \
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed,
const char *name, int exact);
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
static CYTHON_INLINE void __Pyx_ErrRestore(PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetch(PyObject **type, PyObject **value, PyObject **tb);
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
typedef struct {
int code_line;
PyCodeObject* code_object;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
static CYTHON_INLINE npy_uint32 __Pyx_PyInt_As_npy_uint32(PyObject *);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint32_t(uint32_t value);
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if (defined(_WIN32) || defined(__clang__)) && defined(__cplusplus) && CYTHON_CCOMPLEX
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eqf(a, b) ((a)==(b))
#define __Pyx_c_sumf(a, b) ((a)+(b))
#define __Pyx_c_difff(a, b) ((a)-(b))
#define __Pyx_c_prodf(a, b) ((a)*(b))
#define __Pyx_c_quotf(a, b) ((a)/(b))
#define __Pyx_c_negf(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zerof(z) ((z)==(float)0)
#define __Pyx_c_conjf(z) (::std::conj(z))
#if 1
#define __Pyx_c_absf(z) (::std::abs(z))
#define __Pyx_c_powf(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zerof(z) ((z)==0)
#define __Pyx_c_conjf(z) (conjf(z))
#if 1
#define __Pyx_c_absf(z) (cabsf(z))
#define __Pyx_c_powf(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq(a, b) ((a)==(b))
#define __Pyx_c_sum(a, b) ((a)+(b))
#define __Pyx_c_diff(a, b) ((a)-(b))
#define __Pyx_c_prod(a, b) ((a)*(b))
#define __Pyx_c_quot(a, b) ((a)/(b))
#define __Pyx_c_neg(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero(z) ((z)==(double)0)
#define __Pyx_c_conj(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs(z) (::std::abs(z))
#define __Pyx_c_pow(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero(z) ((z)==0)
#define __Pyx_c_conj(z) (conj(z))
#if 1
#define __Pyx_c_abs(z) (cabs(z))
#define __Pyx_c_pow(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
static int __Pyx_check_binary_version(void);
#if !defined(__Pyx_PyIdentifier_FromString)
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s)
#else
#define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s)
#endif
#endif
static PyObject *__Pyx_ImportModule(const char *name);
static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict);
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
/* Module declarations from 'cython' */
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from 'cpython.object' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'libc.stdlib' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/
/* Module declarations from 'libc.stdint' */
/* Module declarations from 'src.crc32' */
/* Module declarations from 'src.fastcrc' */
#define __Pyx_MODULE_NAME "src.fastcrc"
int __pyx_module_is_main_src__fastcrc = 0;
/* Implementation of 'src.fastcrc' */
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_RuntimeError;
static PyObject *__pyx_pf_3src_7fastcrc_crc32(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_data); /* proto */
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */
static char __pyx_k_B[] = "B";
static char __pyx_k_H[] = "H";
static char __pyx_k_I[] = "I";
static char __pyx_k_L[] = "L";
static char __pyx_k_O[] = "O";
static char __pyx_k_Q[] = "Q";
static char __pyx_k_b[] = "b";
static char __pyx_k_d[] = "d";
static char __pyx_k_f[] = "f";
static char __pyx_k_g[] = "g";
static char __pyx_k_h[] = "h";
static char __pyx_k_i[] = "i";
static char __pyx_k_l[] = "l";
static char __pyx_k_q[] = "q";
static char __pyx_k_Zd[] = "Zd";
static char __pyx_k_Zf[] = "Zf";
static char __pyx_k_Zg[] = "Zg";
static char __pyx_k_data[] = "data";
static char __pyx_k_date[] = "__date__";
static char __pyx_k_main[] = "__main__";
static char __pyx_k_size[] = "size";
static char __pyx_k_test[] = "__test__";
static char __pyx_k_GPLv3[] = "GPLv3+";
static char __pyx_k_crc32[] = "crc32";
static char __pyx_k_numpy[] = "numpy";
static char __pyx_k_range[] = "range";
static char __pyx_k_author[] = "__author__";
static char __pyx_k_import[] = "__import__";
static char __pyx_k_nbytes[] = "nbytes";
static char __pyx_k_contact[] = "__contact__";
static char __pyx_k_license[] = "__license__";
static char __pyx_k_19_11_2012[] = "19-11-2012";
static char __pyx_k_ValueError[] = "ValueError";
static char __pyx_k_src_fastcrc[] = "src.fastcrc";
static char __pyx_k_RuntimeError[] = "RuntimeError";
static char __pyx_k_Jerome_Kieffer[] = "Jerome Kieffer";
static char __pyx_k_Jerome_kieffer_esrf_fr[] = "Jerome.kieffer@esrf.fr";
static char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous";
static char __pyx_k_Simple_Cython_module_for_doing[] = "\nSimple Cython module for doing CRC32 for checksums, possibly with SSE4 acceleration\n";
static char __pyx_k_home_jerome_workspace_pyFAI_src[] = "/home/jerome/workspace/pyFAI/src/fastcrc.pyx";
static char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)";
static char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd";
static char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported";
static char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous";
static char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short.";
static PyObject *__pyx_kp_s_19_11_2012;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor;
static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2;
static PyObject *__pyx_kp_s_GPLv3;
static PyObject *__pyx_kp_s_Jerome_Kieffer;
static PyObject *__pyx_kp_s_Jerome_kieffer_esrf_fr;
static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor;
static PyObject *__pyx_n_s_RuntimeError;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_author;
static PyObject *__pyx_n_s_contact;
static PyObject *__pyx_n_s_crc32;
static PyObject *__pyx_n_s_data;
static PyObject *__pyx_n_s_date;
static PyObject *__pyx_kp_s_home_jerome_workspace_pyFAI_src;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_license;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_nbytes;
static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous;
static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_src_fastcrc;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_codeobj__8;
/* "src/fastcrc.pyx":39
*
*
* def crc32(numpy.ndarray data not None): # <<<<<<<<<<<<<<
* """
* Calculate the CRC32 checksum of a numpy array
*/
/* Python wrapper */
static PyObject *__pyx_pw_3src_7fastcrc_1crc32(PyObject *__pyx_self, PyObject *__pyx_v_data); /*proto*/
static char __pyx_doc_3src_7fastcrc_crc32[] = "\n Calculate the CRC32 checksum of a numpy array\n @param data: a numpy array\n @return unsigned integer\n ";
static PyMethodDef __pyx_mdef_3src_7fastcrc_1crc32 = {"crc32", (PyCFunction)__pyx_pw_3src_7fastcrc_1crc32, METH_O, __pyx_doc_3src_7fastcrc_crc32};
static PyObject *__pyx_pw_3src_7fastcrc_1crc32(PyObject *__pyx_self, PyObject *__pyx_v_data) {
CYTHON_UNUSED int __pyx_lineno = 0;
CYTHON_UNUSED const char *__pyx_filename = NULL;
CYTHON_UNUSED int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("crc32 (wrapper)", 0);
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_data), __pyx_ptype_5numpy_ndarray, 0, "data", 0))) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 39; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_r = __pyx_pf_3src_7fastcrc_crc32(__pyx_self, ((PyArrayObject *)__pyx_v_data));
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_3src_7fastcrc_crc32(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_data) {
__pyx_t_5numpy_uint32_t __pyx_v_size;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
__pyx_t_5numpy_uint32_t __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("crc32", 0);
/* "src/fastcrc.pyx":45
* @return unsigned integer
* """
* cdef numpy.uint32_t size = data.nbytes # <<<<<<<<<<<<<<
* return C_crc32( data.data, size)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_data), __pyx_n_s_nbytes); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyInt_As_npy_uint32(__pyx_t_1); if (unlikely((__pyx_t_2 == (npy_uint32)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 45; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_v_size = __pyx_t_2;
/* "src/fastcrc.pyx":46
* """
* cdef numpy.uint32_t size = data.nbytes
* return C_crc32( data.data, size) # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_uint32_t(crc32(((char *)__pyx_v_data->data), __pyx_v_size)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[0]; __pyx_lineno = 46; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "src/fastcrc.pyx":39
*
*
* def crc32(numpy.ndarray data not None): # <<<<<<<<<<<<<<
* """
* Calculate the CRC32 checksum of a numpy array
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("src.fastcrc.crc32", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":194
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_copy_shape;
int __pyx_v_i;
int __pyx_v_ndim;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
int __pyx_v_t;
char *__pyx_v_f;
PyArray_Descr *__pyx_v_descr = 0;
int __pyx_v_offset;
int __pyx_v_hasfields;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getbuffer__", 0);
if (__pyx_v_info != NULL) {
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":200
* # of flags
*
* if info == NULL: return # <<<<<<<<<<<<<<
*
* cdef int copy_shape, i, ndim
*/
__pyx_t_1 = ((__pyx_v_info == NULL) != 0);
if (__pyx_t_1) {
__pyx_r = 0;
goto __pyx_L0;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":203
*
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((&endian_detector)[0] != 0)
*
*/
__pyx_v_endian_detector = 1;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":204
* cdef int copy_shape, i, ndim
* cdef int endian_detector = 1
* cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
*
* ndim = PyArray_NDIM(self)
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":206
* cdef bint little_endian = ((&endian_detector)[0] != 0)
*
* ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<<
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_v_ndim = PyArray_NDIM(__pyx_v_self);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":208
* ndim = PyArray_NDIM(self)
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* copy_shape = 1
* else:
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":209
*
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* copy_shape = 1 # <<<<<<<<<<<<<<
* else:
* copy_shape = 0
*/
__pyx_v_copy_shape = 1;
goto __pyx_L4;
}
/*else*/ {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":211
* copy_shape = 1
* else:
* copy_shape = 0 # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
*/
__pyx_v_copy_shape = 0;
}
__pyx_L4:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":213
* copy_shape = 0
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L6_bool_binop_done;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":214
*
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not C contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L6_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":215
* if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)):
* raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<<
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 215; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":217
* raise ValueError(u"ndarray is not C contiguous")
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<<
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous")
*/
__pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L9_bool_binop_done;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":218
*
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<<
* raise ValueError(u"ndarray is not Fortran contiguous")
*
*/
__pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L9_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":219
* if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS)
* and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)):
* raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<<
*
* info.buf = PyArray_DATA(self)
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 219; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":221
* raise ValueError(u"ndarray is not Fortran contiguous")
*
* info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<<
* info.ndim = ndim
* if copy_shape:
*/
__pyx_v_info->buf = PyArray_DATA(__pyx_v_self);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":222
*
* info.buf = PyArray_DATA(self)
* info.ndim = ndim # <<<<<<<<<<<<<<
* if copy_shape:
* # Allocate new buffer for strides and shape info.
*/
__pyx_v_info->ndim = __pyx_v_ndim;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":223
* info.buf = PyArray_DATA(self)
* info.ndim = ndim
* if copy_shape: # <<<<<<<<<<<<<<
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
*/
__pyx_t_1 = (__pyx_v_copy_shape != 0);
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":226
* # Allocate new buffer for strides and shape info.
* # This is allocated as one block, strides first.
* info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2) # <<<<<<<<<<<<<<
* info.shape = info.strides + ndim
* for i in range(ndim):
*/
__pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2)));
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":227
* # This is allocated as one block, strides first.
* info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2)
* info.shape = info.strides + ndim # <<<<<<<<<<<<<<
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
*/
__pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":228
* info.strides = stdlib.malloc(sizeof(Py_ssize_t) * ndim * 2)
* info.shape = info.strides + ndim
* for i in range(ndim): # <<<<<<<<<<<<<<
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i]
*/
__pyx_t_4 = __pyx_v_ndim;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":229
* info.shape = info.strides + ndim
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<<
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
*/
(__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":230
* for i in range(ndim):
* info.strides[i] = PyArray_STRIDES(self)[i]
* info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<<
* else:
* info.strides = PyArray_STRIDES(self)
*/
(__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]);
}
goto __pyx_L11;
}
/*else*/ {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":232
* info.shape[i] = PyArray_DIMS(self)[i]
* else:
* info.strides = PyArray_STRIDES(self) # <<<<<<<<<<<<<<
* info.shape = PyArray_DIMS(self)
* info.suboffsets = NULL
*/
__pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self));
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":233
* else:
* info.strides = PyArray_STRIDES(self)
* info.shape = PyArray_DIMS(self) # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
*/
__pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self));
}
__pyx_L11:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":234
* info.strides = PyArray_STRIDES(self)
* info.shape = PyArray_DIMS(self)
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self)
*/
__pyx_v_info->suboffsets = NULL;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":235
* info.shape = PyArray_DIMS(self)
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<<
* info.readonly = not PyArray_ISWRITEABLE(self)
*
*/
__pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":236
* info.suboffsets = NULL
* info.itemsize = PyArray_ITEMSIZE(self)
* info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<<
*
* cdef int t
*/
__pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0));
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":239
*
* cdef int t
* cdef char* f = NULL # <<<<<<<<<<<<<<
* cdef dtype descr = self.descr
* cdef list stack
*/
__pyx_v_f = NULL;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":240
* cdef int t
* cdef char* f = NULL
* cdef dtype descr = self.descr # <<<<<<<<<<<<<<
* cdef list stack
* cdef int offset
*/
__pyx_t_3 = ((PyObject *)__pyx_v_self->descr);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_descr = ((PyArray_Descr *)__pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":244
* cdef int offset
*
* cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<<
*
* if not hasfields and not copy_shape:
*/
__pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":246
* cdef bint hasfields = PyDataType_HASFIELDS(descr)
*
* if not hasfields and not copy_shape: # <<<<<<<<<<<<<<
* # do not call releasebuffer
* info.obj = None
*/
__pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L15_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L15_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":248
* if not hasfields and not copy_shape:
* # do not call releasebuffer
* info.obj = None # <<<<<<<<<<<<<<
* else:
* # need to call releasebuffer
*/
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = Py_None;
goto __pyx_L14;
}
/*else*/ {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":251
* else:
* # need to call releasebuffer
* info.obj = self # <<<<<<<<<<<<<<
*
* if not hasfields:
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
}
__pyx_L14:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":253
* info.obj = self
*
* if not hasfields: # <<<<<<<<<<<<<<
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
*/
__pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0);
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":254
*
* if not hasfields:
* t = descr.type_num # <<<<<<<<<<<<<<
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
*/
__pyx_t_4 = __pyx_v_descr->type_num;
__pyx_v_t = __pyx_t_4;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":255
* if not hasfields:
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0);
if (!__pyx_t_2) {
goto __pyx_L20_next_or;
} else {
}
__pyx_t_2 = (__pyx_v_little_endian != 0);
if (!__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_L20_next_or:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":256
* t = descr.type_num
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
*/
__pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L19_bool_binop_done;
}
__pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L19_bool_binop_done:;
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":257
* if ((descr.byteorder == c'>' and little_endian) or
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 257; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
switch (__pyx_v_t) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":258
* (descr.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
*/
case NPY_BYTE:
__pyx_v_f = __pyx_k_b;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":259
* raise ValueError(u"Non-native byte order not supported")
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
*/
case NPY_UBYTE:
__pyx_v_f = __pyx_k_B;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":260
* if t == NPY_BYTE: f = "b"
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
*/
case NPY_SHORT:
__pyx_v_f = __pyx_k_h;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":261
* elif t == NPY_UBYTE: f = "B"
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
*/
case NPY_USHORT:
__pyx_v_f = __pyx_k_H;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":262
* elif t == NPY_SHORT: f = "h"
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
*/
case NPY_INT:
__pyx_v_f = __pyx_k_i;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":263
* elif t == NPY_USHORT: f = "H"
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
*/
case NPY_UINT:
__pyx_v_f = __pyx_k_I;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":264
* elif t == NPY_INT: f = "i"
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
*/
case NPY_LONG:
__pyx_v_f = __pyx_k_l;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":265
* elif t == NPY_UINT: f = "I"
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
*/
case NPY_ULONG:
__pyx_v_f = __pyx_k_L;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":266
* elif t == NPY_LONG: f = "l"
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
*/
case NPY_LONGLONG:
__pyx_v_f = __pyx_k_q;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":267
* elif t == NPY_ULONG: f = "L"
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
*/
case NPY_ULONGLONG:
__pyx_v_f = __pyx_k_Q;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":268
* elif t == NPY_LONGLONG: f = "q"
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
*/
case NPY_FLOAT:
__pyx_v_f = __pyx_k_f;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":269
* elif t == NPY_ULONGLONG: f = "Q"
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
*/
case NPY_DOUBLE:
__pyx_v_f = __pyx_k_d;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":270
* elif t == NPY_FLOAT: f = "f"
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
*/
case NPY_LONGDOUBLE:
__pyx_v_f = __pyx_k_g;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":271
* elif t == NPY_DOUBLE: f = "d"
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
*/
case NPY_CFLOAT:
__pyx_v_f = __pyx_k_Zf;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":272
* elif t == NPY_LONGDOUBLE: f = "g"
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O"
*/
case NPY_CDOUBLE:
__pyx_v_f = __pyx_k_Zd;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":273
* elif t == NPY_CFLOAT: f = "Zf"
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f = "O"
* else:
*/
case NPY_CLONGDOUBLE:
__pyx_v_f = __pyx_k_Zg;
break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":274
* elif t == NPY_CDOUBLE: f = "Zd"
* elif t == NPY_CLONGDOUBLE: f = "Zg"
* elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
case NPY_OBJECT:
__pyx_v_f = __pyx_k_O;
break;
default:
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":276
* elif t == NPY_OBJECT: f = "O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* info.format = f
* return
*/
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_6);
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 276; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
break;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":277
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f # <<<<<<<<<<<<<<
* return
* else:
*/
__pyx_v_info->format = __pyx_v_f;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":278
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* info.format = f
* return # <<<<<<<<<<<<<<
* else:
* info.format = stdlib.malloc(_buffer_format_string_len)
*/
__pyx_r = 0;
goto __pyx_L0;
}
/*else*/ {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":280
* return
* else:
* info.format = stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<<
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
*/
__pyx_v_info->format = ((char *)malloc(255));
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":281
* else:
* info.format = stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<<
* offset = 0
* f = _util_dtypestring(descr, info.format + 1,
*/
(__pyx_v_info->format[0]) = '^';
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":282
* info.format = stdlib.malloc(_buffer_format_string_len)
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0 # <<<<<<<<<<<<<<
* f = _util_dtypestring(descr, info.format + 1,
* info.format + _buffer_format_string_len,
*/
__pyx_v_offset = 0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":283
* info.format[0] = c'^' # Native data types, manual alignment
* offset = 0
* f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<<
* info.format + _buffer_format_string_len,
* &offset)
*/
__pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 255), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 283; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_7;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":286
* info.format + _buffer_format_string_len,
* &offset)
* f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<<
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
*/
(__pyx_v_f[0]) = '\x00';
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":194
* # experimental exception made for __getbuffer__ and __releasebuffer__
* # -- the details of this may change.
* def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<<
* # This implementation of getbuffer is geared towards Cython
* # requirements, and does not yet fullfill the PEP.
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(Py_None);
__Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL;
}
__pyx_L2:;
__Pyx_XDECREF((PyObject *)__pyx_v_descr);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":288
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* Python wrapper */
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/
static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0);
__pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__releasebuffer__", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":289
*
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<<
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
*/
__pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0);
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":290
* def __releasebuffer__(ndarray self, Py_buffer* info):
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format) # <<<<<<<<<<<<<<
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides)
*/
free(__pyx_v_info->format);
goto __pyx_L3;
}
__pyx_L3:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":291
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<<
* stdlib.free(info.strides)
* # info.shape was stored after info.strides in the same block
*/
__pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0);
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":292
* stdlib.free(info.format)
* if sizeof(npy_intp) != sizeof(Py_ssize_t):
* stdlib.free(info.strides) # <<<<<<<<<<<<<<
* # info.shape was stored after info.strides in the same block
*
*/
free(__pyx_v_info->strides);
goto __pyx_L4;
}
__pyx_L4:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":288
* f[0] = c'\0' # Terminate format string
*
* def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<<
* if PyArray_HASFIELDS(self):
* stdlib.free(info.format)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":768
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":769
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 769; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":768
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":771
* return PyArray_MultiIterNew(1, a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, a, b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":772
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 772; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":771
* return PyArray_MultiIterNew(1, a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, a, b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":774
* return PyArray_MultiIterNew(2, a, b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, a, b, c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":775
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 775; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":774
* return PyArray_MultiIterNew(2, a, b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, a, b, c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":777
* return PyArray_MultiIterNew(3, a, b, c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, a, b, c, d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":778
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 778; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":777
* return PyArray_MultiIterNew(3, a, b, c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, a, b, c, d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":780
* return PyArray_MultiIterNew(4, a, b, c, d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, a, b, c, d, e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":781
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<<
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 781; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":780
* return PyArray_MultiIterNew(4, a, b, c, d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, a, b, c, d, e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":783
* return PyArray_MultiIterNew(5, a, b, c, d, e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) {
PyArray_Descr *__pyx_v_child = 0;
int __pyx_v_endian_detector;
int __pyx_v_little_endian;
PyObject *__pyx_v_fields = 0;
PyObject *__pyx_v_childname = NULL;
PyObject *__pyx_v_new_offset = NULL;
PyObject *__pyx_v_t = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_t_7;
long __pyx_t_8;
char *__pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_util_dtypestring", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":790
* cdef int delta_offset
* cdef tuple i
* cdef int endian_detector = 1 # <<<<<<<<<<<<<<
* cdef bint little_endian = ((&endian_detector)[0] != 0)
* cdef tuple fields
*/
__pyx_v_endian_detector = 1;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":791
* cdef tuple i
* cdef int endian_detector = 1
* cdef bint little_endian = ((&endian_detector)[0] != 0) # <<<<<<<<<<<<<<
* cdef tuple fields
*
*/
__pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
if (unlikely(__pyx_v_descr->names == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
for (;;) {
if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break;
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#else
__pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 794; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
#endif
__Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3);
__pyx_t_3 = 0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":795
*
* for childname in descr.names:
* fields = descr.fields[childname] # <<<<<<<<<<<<<<
* child, new_offset = fields
*
*/
__pyx_t_3 = PyObject_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(__pyx_t_3 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;};
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 795; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3));
__pyx_t_3 = 0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":796
* for childname in descr.names:
* fields = descr.fields[childname]
* child, new_offset = fields # <<<<<<<<<<<<<<
*
* if (end - f) - (new_offset - offset[0]) < 15:
*/
if (likely(__pyx_v_fields != Py_None)) {
PyObject* sequence = __pyx_v_fields;
#if CYTHON_COMPILING_IN_CPYTHON
Py_ssize_t size = Py_SIZE(sequence);
#else
Py_ssize_t size = PySequence_Size(sequence);
#endif
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
#if CYTHON_COMPILING_IN_CPYTHON
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
#endif
} else {
__Pyx_RaiseNoneNotIterableError(); {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 796; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3));
__pyx_t_3 = 0;
__Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":798
* child, new_offset = fields
*
* if (end - f) - (new_offset - offset[0]) < 15: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
*/
__pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 798; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0);
if (__pyx_t_6) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":799
*
* if (end - f) - (new_offset - offset[0]) < 15:
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<<
*
* if ((child.byteorder == c'>' and little_endian) or
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 799; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":801
* raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
*
* if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<<
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported")
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0);
if (!__pyx_t_7) {
goto __pyx_L8_next_or;
} else {
}
__pyx_t_7 = (__pyx_v_little_endian != 0);
if (!__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_L8_next_or:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":802
*
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<<
* raise ValueError(u"Non-native byte order not supported")
* # One could encode it in the format string and have Cython
*/
__pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0);
if (__pyx_t_7) {
} else {
__pyx_t_6 = __pyx_t_7;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0);
__pyx_t_6 = __pyx_t_7;
__pyx_L7_bool_binop_done:;
if (__pyx_t_6) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":803
* if ((child.byteorder == c'>' and little_endian) or
* (child.byteorder == c'<' and not little_endian)):
* raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<<
* # One could encode it in the format string and have Cython
* # complain instead, BUT: < and > in format strings also imply
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 803; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":813
*
* # Output padding bytes
* while offset[0] < new_offset: # <<<<<<<<<<<<<<
* f[0] = 120 # "x"; pad byte
* f += 1
*/
while (1) {
__pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 813; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (!__pyx_t_6) break;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":814
* # Output padding bytes
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<<
* f += 1
* offset[0] += 1
*/
(__pyx_v_f[0]) = 120;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":815
* while offset[0] < new_offset:
* f[0] = 120 # "x"; pad byte
* f += 1 # <<<<<<<<<<<<<<
* offset[0] += 1
*
*/
__pyx_v_f = (__pyx_v_f + 1);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":816
* f[0] = 120 # "x"; pad byte
* f += 1
* offset[0] += 1 # <<<<<<<<<<<<<<
*
* offset[0] += child.itemsize
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1);
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":818
* offset[0] += 1
*
* offset[0] += child.itemsize # <<<<<<<<<<<<<<
*
* if not PyDataType_HASFIELDS(child):
*/
__pyx_t_8 = 0;
(__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":820
* offset[0] += child.itemsize
*
* if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<<
* t = child.type_num
* if end - f < 5:
*/
__pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0);
if (__pyx_t_6) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":821
*
* if not PyDataType_HASFIELDS(child):
* t = child.type_num # <<<<<<<<<<<<<<
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.")
*/
__pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 821; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4);
__pyx_t_4 = 0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":822
* if not PyDataType_HASFIELDS(child):
* t = child.type_num
* if end - f < 5: # <<<<<<<<<<<<<<
* raise RuntimeError(u"Format string allocated too short.")
*
*/
__pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0);
if (__pyx_t_6) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":823
* t = child.type_num
* if end - f < 5:
* raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<<
*
* # Until ticket #99 is fixed, use integers to avoid warnings
*/
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 823; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":826
*
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<<
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
*/
__pyx_t_4 = PyInt_FromLong(NPY_BYTE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 826; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 98;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":827
* # Until ticket #99 is fixed, use integers to avoid warnings
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<<
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
*/
__pyx_t_3 = PyInt_FromLong(NPY_UBYTE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 827; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 66;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":828
* if t == NPY_BYTE: f[0] = 98 #"b"
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<<
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
*/
__pyx_t_4 = PyInt_FromLong(NPY_SHORT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 828; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 104;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":829
* elif t == NPY_UBYTE: f[0] = 66 #"B"
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<<
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
*/
__pyx_t_3 = PyInt_FromLong(NPY_USHORT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 829; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 72;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":830
* elif t == NPY_SHORT: f[0] = 104 #"h"
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<<
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
*/
__pyx_t_4 = PyInt_FromLong(NPY_INT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 830; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 105;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":831
* elif t == NPY_USHORT: f[0] = 72 #"H"
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<<
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
*/
__pyx_t_3 = PyInt_FromLong(NPY_UINT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 831; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 73;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":832
* elif t == NPY_INT: f[0] = 105 #"i"
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<<
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 832; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 108;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":833
* elif t == NPY_UINT: f[0] = 73 #"I"
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<<
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
*/
__pyx_t_3 = PyInt_FromLong(NPY_ULONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 833; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 76;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":834
* elif t == NPY_LONG: f[0] = 108 #"l"
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<<
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 834; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 113;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":835
* elif t == NPY_ULONG: f[0] = 76 #"L"
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<<
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
*/
__pyx_t_3 = PyInt_FromLong(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 835; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 81;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":836
* elif t == NPY_LONGLONG: f[0] = 113 #"q"
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<<
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
*/
__pyx_t_4 = PyInt_FromLong(NPY_FLOAT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 836; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 102;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":837
* elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<<
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
*/
__pyx_t_3 = PyInt_FromLong(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 837; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 100;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":838
* elif t == NPY_FLOAT: f[0] = 102 #"f"
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<<
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
*/
__pyx_t_4 = PyInt_FromLong(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 838; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 103;
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":839
* elif t == NPY_DOUBLE: f[0] = 100 #"d"
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<<
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
*/
__pyx_t_3 = PyInt_FromLong(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 839; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 102;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":840
* elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<<
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O"
*/
__pyx_t_4 = PyInt_FromLong(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 840; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 100;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":841
* elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<<
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
*/
__pyx_t_3 = PyInt_FromLong(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 841; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 90;
(__pyx_v_f[1]) = 103;
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L15;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":842
* elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
* elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
* elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<<
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
*/
__pyx_t_4 = PyInt_FromLong(NPY_OBJECT); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 842; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (__pyx_t_6) {
(__pyx_v_f[0]) = 79;
goto __pyx_L15;
}
/*else*/ {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":844
* elif t == NPY_OBJECT: f[0] = 79 #"O"
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<<
* f += 1
* else:
*/
__pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
{__pyx_filename = __pyx_f[1]; __pyx_lineno = 844; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
}
__pyx_L15:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":845
* else:
* raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
* f += 1 # <<<<<<<<<<<<<<
* else:
* # Cython ignores struct boundary information ("T{...}"),
*/
__pyx_v_f = (__pyx_v_f + 1);
goto __pyx_L13;
}
/*else*/ {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":849
* # Cython ignores struct boundary information ("T{...}"),
* # so don't output it
* f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<<
* return f
*
*/
__pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) {__pyx_filename = __pyx_f[1]; __pyx_lineno = 849; __pyx_clineno = __LINE__; goto __pyx_L1_error;}
__pyx_v_f = __pyx_t_9;
}
__pyx_L13:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":794
* cdef tuple fields
*
* for childname in descr.names: # <<<<<<<<<<<<<<
* fields = descr.fields[childname]
* child, new_offset = fields
*/
}
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":850
* # so don't output it
* f = _util_dtypestring(child, f, end, offset)
* return f # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_f;
goto __pyx_L0;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":783
* return PyArray_MultiIterNew(5, a, b, c, d, e)
*
* cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<<
* # Recursive utility function used in __getbuffer__ to get format
* # string. The new location in the format string is returned.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_child);
__Pyx_XDECREF(__pyx_v_fields);
__Pyx_XDECREF(__pyx_v_childname);
__Pyx_XDECREF(__pyx_v_new_offset);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
PyObject *__pyx_v_baseptr;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":968
* cdef inline void set_array_base(ndarray arr, object base):
* cdef PyObject* baseptr
* if base is None: # <<<<<<<<<<<<<<
* baseptr = NULL
* else:
*/
__pyx_t_1 = (__pyx_v_base == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":969
* cdef PyObject* baseptr
* if base is None:
* baseptr = NULL # <<<<<<<<<<<<<<
* else:
* Py_INCREF(base) # important to do this before decref below!
*/
__pyx_v_baseptr = NULL;
goto __pyx_L3;
}
/*else*/ {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":971
* baseptr = NULL
* else:
* Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<<
* baseptr = base
* Py_XDECREF(arr.base)
*/
Py_INCREF(__pyx_v_base);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":972
* else:
* Py_INCREF(base) # important to do this before decref below!
* baseptr = base # <<<<<<<<<<<<<<
* Py_XDECREF(arr.base)
* arr.base = baseptr
*/
__pyx_v_baseptr = ((PyObject *)__pyx_v_base);
}
__pyx_L3:;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":973
* Py_INCREF(base) # important to do this before decref below!
* baseptr = base
* Py_XDECREF(arr.base) # <<<<<<<<<<<<<<
* arr.base = baseptr
*
*/
Py_XDECREF(__pyx_v_arr->base);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":974
* baseptr = base
* Py_XDECREF(arr.base)
* arr.base = baseptr # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
__pyx_v_arr->base = __pyx_v_baseptr;
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":966
*
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* cdef PyObject* baseptr
* if base is None:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":976
* arr.base = baseptr
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* if arr.base is NULL:
* return None
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":977
*
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL: # <<<<<<<<<<<<<<
* return None
* else:
*/
__pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../../usr/lib/python2.7/dist-packages/Cython/Includes/numpy/__init__.pxd":978
* cdef inline object get_array_base(ndarray arr):
* if arr.base is NULL:
* return None # <<<<<<<<<<<<<<
* else:
* return