diff --git a/.travis.yml b/.travis.yml index 4818ef2..e6bdb69 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,8 +1,7 @@ language: python python: - - "2.7" - - "3.5" + - "3.6" sudo: false @@ -11,11 +10,7 @@ install: # conda line below will keep everything up-to-date. We do this # conditionally because it saves us some downloading if the version is # the same. - - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh; - else - wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; - fi + - wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh; - bash miniconda.sh -b -p $HOME/miniconda - export PATH="$HOME/miniconda/bin:$PATH" - hash -r diff --git a/README.md b/README.md index 13f442a..d3f9160 100644 --- a/README.md +++ b/README.md @@ -49,6 +49,28 @@ In the near future: - JIT classes for all interpolation objects +## jitted, non-uniform multilinear interpolation + +There is a simple `interp` function with a flexible API which does multinear on uniform or non uniform cartesian grids. + +``` +### 1d grid +from interpolation import interp + +x = np.linspace(0,1,100)**2 # non-uniform points +y = np.linspace(0,1,100) # values + +# interpolate at one point: +interp(x,y,0.5) + +# or at many points: +u = np.linspace(0,1,1000) # points +interp(x,y,u) +``` + + + + ## smolyak interpolation diff --git a/examples/example_mlinterp.py b/examples/example_mlinterp.py new file mode 100644 index 0000000..80b7b13 --- /dev/null +++ b/examples/example_mlinterp.py @@ -0,0 +1,129 @@ +import numpy as np + +from numba import generated_jit +import ast + +C = ((0.1,0.2),(0.1,0.2)) +l = (0.1,0.5) + +from interpolation.multilinear.fungen import extract_row, tensor_reduction + +tensor_reduction(C,l) + +ll = np.row_stack([1-np.array(l),l]) +ll +np.einsum('ij,i,j', np.array(C), ll[0,:], ll[1,:]) + +A = np.random.random((5,5)) +extract_row(A, 1, (2,2)) + +from interpolation.multilinear.fungen import get_coeffs +get_coeffs(A, (1,2)) + +########## +# interp # +########## + +### 1d interpolation + +from interpolation import interp + +x = np.linspace(0,1,100)**2 # non-uniform points +y = np.linspace(0,1,100) # values + +# interpolate at one point: +interp(x,y,0.5) + +# or at many points: +u = np.linspace(0,1,1000) # points +interp(x,y,u) + +# one can iterate at low cost since the function is jitable: +from numba import njit +@njit +def vec_eval(u): + N = u.shape[0] + out = np.zeros(N) + for n in range(N): + out[n] = interp(x,y,u) + return out + +print( abs(vec_eval(u) - interp(x,y,u)).max()) + + +### 2d interpolation (same for higher orders) + + +from interpolation import interp + +x1 = np.linspace(0,1,100)**2 # non-uniform points +x2 = np.linspace(0,1,100)**2 # non-uniform points +y = np.array([[np.sqrt(u1**2 + u2**2) for u2 in x2] for u1 in x1]) +# (y[i,j] = sqrt(x1[i]**2+x2[j]**2) + + +# interpolate at one point: + +interp(x1,x2,y,0.5,0.2) +interp(x1,x2,y,(0.5,0.2)) + +# or at many points: (each line corresponds to one observation) +points = np.random.random((1000,2)) +interp(x1,x2,y,points) + +from numba import njit +@njit +def vec_eval(p): + N = p.shape[0] + out = np.zeros(N) + for n in range(N): + z1 = p[n,0] + z2 = p[n,1] + out[n] = interp(x1,x2,y,z1,z2) + return out + +print( abs(vec_eval(points) - interp(x1,x2,y,points)).max()) + + +# in the special case where the points at which one wants to interpolate +# form a cartesian grid, one can use another call style: + +z1 = np.linspace(0,1,100) +z2 = np.linspace(0,1,100) +out = interp(x1,x2,y,z1,z2) +# out[i,j] contains f(z1[i],z2[j]) + + + +############ +# mlinterp # +############ + +# same as interp but with less flexible and more general API + +from interpolation import mlinterp + +x1 = np.linspace(0,1,100)**2 # non-uniform points for first dimensoin +x2 = (0,1,100) # uniform points for second dimension +grid = (x1,x2) +y = np.array([[np.sqrt(u1**2 + u2**2) for u2 in x2] for u1 in x1]) + + +points = np.random.random((1000,2)) + +# vectorized call: +mlinterp(grid, y, points) + +# non-vectorized call (note third argument must be a tuple of floats of right size) +mlinterp(grid, y, (0.4, 0.2)) + +# arbitrary dimension + +d = 4 +K = 100 +N = 10000 +grid = (np.linspace(0,1,K),)*d +y = np.random.random((K,)*d) +z = np.random.random((N,d)) + +mlinterp(grid,y,z) diff --git a/interpolation/__init__.py b/interpolation/__init__.py index e69de29..5e70b33 100644 --- a/interpolation/__init__.py +++ b/interpolation/__init__.py @@ -0,0 +1 @@ +from .multilinear.mlinterp import interp, mlinterp diff --git a/interpolation/multilinear/__init__.py b/interpolation/multilinear/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/interpolation/multilinear/fungen.py b/interpolation/multilinear/fungen.py new file mode 100644 index 0000000..296036f --- /dev/null +++ b/interpolation/multilinear/fungen.py @@ -0,0 +1,189 @@ +import numba +import numpy as np +from numba import float64, int64 +from numba import generated_jit, njit +import ast + +from numba.extending import overload +from numba.types.containers import Tuple, UniTuple + + +#################### +# Dimension helper # +#################### + +t_coord = numba.typeof((2.3,2.4,1)) # type of an evenly spaced dimension +t_array = numba.typeof(np.array([4.0, 3.9])) # type of an unevenly spaced dimension + +# returns the index of a 1d point along a 1d dimension +@generated_jit(nopython=True) +def get_index(gc, x): + if gc == t_coord: + # regular coordinate + def fun(gc,x): + δ = (gc[1]-gc[0])/(gc[2]-1) + d = x-gc[0] + ii = d // δ + r = d-ii*δ + i = int(ii) + λ = r/δ + return (i, λ) + return fun + else: + # irregular coordinate + def fun(gc,x): + i = int(np.searchsorted(gc, x))-1 + λ = (x-gc[i])/(gc[i+1]-gc[i]) + return (i, λ) + return fun + +# returns number of dimension of a dimension +@generated_jit(nopython=True) +def get_size(gc): + if gc == t_coord: + # regular coordinate + def fun(gc): + return gc[2] + return fun + else: + # irregular coordinate + def fun(gc): + return len(gc) + return fun + +##################### +# Generator helpers # +##################### + +# the next functions replace the use of generators, with the difference that the +# output is a tuple, which dimension is known by the jit guy. + +# example: +# ``` +# def f(x): x**2 +# fmap(f, (1,2,3)) -> (1,3,9) +# def g(x,y): x**2 + y +# fmap(g, (1,2,3), 0.1) -> (1.1,3.1,9.1) # (g(1,0.1), g(2,0.1), g(3,0.1)) +# def g(x,y): x**2 + y +# fmap(g, (1,2,3), (0.1,0.2,0.3)) -> (1.1,3.0.12,9.3) +# ``` + + +def fmap(): + pass + +@overload(fmap) +def _map(*args): + + if len(args)==2 and isinstance(args[1], (Tuple, UniTuple)): + k = len(args[1]) + s = "def __map(f, t): return ({}, )".format(str.join(', ',['f(t[{}])'.format(i) for i in range(k)])) + elif len(args)==3 and isinstance(args[1], (Tuple, UniTuple)): + k = len(args[1]) + if isinstance(args[2], (Tuple, UniTuple)): + if len(args[2]) != k: + # we don't know what to do in this case + return None + s = "def __map(f, t1, t2): return ({}, )".format(str.join(', ',['f(t1[{}], t2[{}])'.format(i,i) for i in range(k)])) + else: + s = "def __map(f, t1, x): return ({}, )".format(str.join(', ',['f(t1[{}], x)'.format(i,i) for i in range(k)])) + else: + return None + d = {} + eval(compile(ast.parse(s),'','exec'), d) + return d['__map'] + + +# not that `fmap` does nothing in python mode... +# an alternative would be +# +# @njit +# def _fmap(): +# pass +# +# @overload(_fmap) +# ... +# +# @njit +# def fmap(*args): +# return _fmap(*args) +# +# but this seems to come with a performance cost. +# It it is also possible to overload `map` but we would risk +# a future conflict with the map api. + + +# +# @njit +# def fmap(*args): +# return _fmap(*args) +# +# funzip(((1,2), (2,3), (4,3))) -> ((1,2,4),(2,3,3)) + +@generated_jit(nopython=True) +def funzip(t): + k = t.count + assert(len(set([e.count for e in t.types]))==1) + l = t.types[0].count + def print_tuple(t): return "({},)".format(str.join(", ", t)) + tab = [ [ 't[{}][{}]'.format(i,j) for i in range(k)] for j in range(l) ] + s = "def funzip(t): return {}".format(print_tuple( [print_tuple(e) for e in tab] )) + d = {} + eval(compile(ast.parse(s),'','exec'), d) + return d['funzip'] + + +##### +# array subscribing: +# when X is a 2d array and I=(i,j) a 2d index, `get_coeffs(X,I)` +# extracts `X[i:i+1,j:j+1]` but represents it as a tuple of tuple, so that +# the number of its elements can be inferred by the compiler +##### + + +@generated_jit(nopython=True) +def get_coeffs(X,I): + if X.ndim>len(I): + print("not implemented yet") + else: + from itertools import product + d = len(I) + mat = np.array( ["X[{}]".format(str.join(',', e)) for e in product(*[(f"I[{j}]", f"I[{j}]+1") for j in range(d)])] ).reshape((2,)*d) + mattotup = lambda mat: mat if isinstance(mat,str) else "({})".format(str.join(',',[mattotup(e) for e in mat])) + t = mattotup(mat) + s = "def get_coeffs(X,I): return {}".format(t) + dd = {} + eval(compile(ast.parse(s),'','exec'), dd) + return dd['get_coeffs'] + return fun + +# tensor_reduction(C,l) +# (in 2d) computes the equivalent of np.einsum('ij,i,j->', C, [1-l[0],l[0]], [1-l[1],l[1]])` +# but where l is a tuple and C a tuple of tuples. + +# this one is a temporary implementation (should be merged with old gen_splines* code) +def gen_tensor_reduction(X, symbs, inds=[]): + if len(symbs) == 0: + return '{}[{}]'.format(X, str.join('][',[str(e) for e in inds])) + else: + h = symbs[0] + q = symbs[1:] + exprs = [ '{}*({})'.format(h if i==1 else '(1-{})'.format(h),gen_tensor_reduction(X, q,inds + [i])) for i in range(2)] + return str.join( ' + ', exprs ) + + +@generated_jit(nopython=True) +def tensor_reduction(C,l): + ex = gen_tensor_reduction('C', ['l[{}]'.format(i) for i in range(len(l.types))]) + dd = dict() + s = "def tensor_reduction(C,l): return {}".format(ex) + eval(compile(ast.parse(s),'','exec'), dd) + return dd['tensor_reduction'] + +@generated_jit(nopython=True) +def extract_row(a, n, tup): + d = len(tup.types) + dd = {} + s = "def extract_row(a, n, tup): return ({},)".format(str.join(', ', [f"a[n,{i}]" for i in range(d)])) + eval(compile(ast.parse(s),'','exec'), dd) + return dd['extract_row'] diff --git a/interpolation/multilinear/mlinterp.py b/interpolation/multilinear/mlinterp.py new file mode 100644 index 0000000..94cb9aa --- /dev/null +++ b/interpolation/multilinear/mlinterp.py @@ -0,0 +1,194 @@ +# the following code implements a function +# +# interpolate(grid, c, u) +# +# where grid is a cartesian grid, but dimensions are not necessarly evenly spaced +# grid are represented by a tuple like: +# - `( (-1.0,1.0,10), (-1.0,1.0,20) )` : a `10x20` even cartesian grid on `[-1,1]^2` +# - `( linspace(0,1,10), linspace(0,1,100)**2)` : a `10x100` uneven cartesian grid on `[0,1]^2` +# - `( (0,1.0,10), linspace(0,1,100)**2)` : a `10x100` cartesian grid where first dimension is evenly distributed, the second not + +# there is only one (easy-to-read?) jitted implementation of `interpolate`, line 185 +# it depends on several generated jit functions which dispatch the right behaviour +# in this example this helper functions are written by hand, but for any dimension +# the code could be generated just in time too. + + +################################# +# Actual interpolation function # +################################# + +from .fungen import fmap, funzip, get_coeffs, tensor_reduction, get_index, extract_row + +from numba import njit +from typing import Tuple + +from numba.types import UniTuple, Array, float64 +from numba.types import Float + +import numpy as np +from numba import generated_jit + +#logic of multilinear interpolation + + +@generated_jit +def mlinterp(grid, c, u): + if isinstance(u, UniTuple): + def mlininterp(grid: Tuple, c: Array, u: Tuple)->float: + # get indices and barycentric coordinates + tmp = fmap(get_index, grid, u) + indices, barycenters = funzip(tmp) + coeffs = get_coeffs(c, indices) + v = tensor_reduction(coeffs, barycenters) + return v + elif isinstance(u, Array) and u.ndim==2: + def mlininterp(grid: Tuple, c: Array, u: Array)->float: + N = u.shape[0] + res = np.zeros(N) + for n in range(N): + uu = extract_row(u, n, grid) + # get indices and barycentric coordinates + tmp = fmap(get_index, grid, uu) + indices, barycenters = funzip(tmp) + coeffs = get_coeffs(c, indices) + res[n] = tensor_reduction(coeffs, barycenters) + return res + else: + mlininterp = None + return mlininterp + +### The rest of this file constrcts function `interp` + +from collections import namedtuple +itt = namedtuple("InterpType", ['d', 'values', 'eval']) + + +def detect_types(args): + + dims = [e.ndim if isinstance(e,Array) else -1 for e in args] + + md = max(dims) + + if len(args)==3: + d=1 + i_C=1 + else: + i_C=dims.index(md) # index of coeffs + d=i_C + + if args[i_C].ndim == d: + values_type = 'scalar' + else: + assert(args[i_C].ndim==d+1) + values_type = 'vector' + + eval_args = args[(i_C+1):] + + if len(eval_args)>=2: + if set([isinstance(e,Array) for e in eval_args])==set([True]): + eval_type = 'cartesian' + assert(set([e.ndim for e in eval_args])==set([1])) + elif set([isinstance(e,Float) for e in eval_args])==set([True]): + eval_type = 'float' + else: + raise Exception("Undetected evaluation type.") + else: + if isinstance(eval_args[0], Array): + if eval_args[0].ndim==1: + eval_type = 'point' + elif eval_args[0].ndim==2: + eval_type = 'vectorized' + else: + raise Exception("Undetected evaluation type.") + elif isinstance(eval_args[0], UniTuple): + eval_type = 'tuple' + elif set([isinstance(e,Float) for e in eval_args])==set([True]): + eval_type = 'float' + else: + raise Exception("Undetected evaluation type.") + + return itt(d, values_type, eval_type) + + +def make_mlinterp(it, funname): + + if it.values =='vector': + return None + + if it.eval in ('float', 'tuple') and it.values =='vector': + # raise Exception("Non supported. (return type unknown)") + return None + + # grid = str.join(',', ['args[{}]'.format(i) for i in range(it.d)]) + grid_s = "({},)".format(str.join(',', [f"args[{i}]" for i in range(it.d)])) + if it.eval in ('float','point','tuple'): + if it.eval == 'float': + point_s = "({},)".format(str.join(',', [f"args[{it.d+i+1}]" for i in range(it.d)])) + # point_s = f"(args[{d+1}])" + elif it.eval == 'tuple': + point_s = f"args[{it.d+1}]" + else: + point_s = "({},)".format(str.join(',', [f"args[{it.d+1}][{i}]" for i in range(it.d)])) + + source = f"""\ +def {funname}(*args): + grid = {grid_s} + C = args[{it.d}] + point = {point_s} + res = mlinterp(grid, C, point) + return res + """ + return source + elif it.eval == "vectorized": + p_s = "({},)".format(str.join(',', [f"points[n,{i}]" for i in range(it.d)])) + source = f"""\ +from numpy import zeros +def {funname}(*args): + grid = {grid_s} + C = args[{it.d}] + points = args[{it.d+1}] + N = points.shape[0] + res = zeros(N) + # return res + for n in range(N): + res[n] = mlinterp(grid, C, {p_s}) + return res +""" + return source + + elif it.eval == 'cartesian': + if it.d != 2: + return None + source = f""" +from numpy import zeros +def {funname}(*args): + grid = {grid_s} + C = args[{it.d}] + points_x = args[3] + points_y = args[4] + N = points_x.shape[0] + M = points_y.shape[0] + res = zeros((N,M)) + for n in range(N): + for m in range(M): + res[n,m] = mlinterp(grid, C, (points_x[n], points_y[m])) + return res +""" + return source + + +from numba import generated_jit + +@generated_jit(nopython=True) +def interp(*args): + + aa = args[0].types + + it = detect_types(aa) + source = make_mlinterp(it,'__mlinterp') + import ast + tree = ast.parse(source) + code = compile(tree, "", "exec") + eval(code, globals()) + return __mlinterp diff --git a/interpolation/multilinear/tests/test_multilinear.py b/interpolation/multilinear/tests/test_multilinear.py new file mode 100644 index 0000000..945d58b --- /dev/null +++ b/interpolation/multilinear/tests/test_multilinear.py @@ -0,0 +1,171 @@ + +from numpy import linspace, array +from numpy.random import random +from numba import typeof + + +# 2d-vecev-scalar +a2 = (linspace(0,1,10), random((10)), random((200,1))) +# 2d-pointev-scalar +a3 = (linspace(0,1,10), random((10)), array([0.5])) +# 2d-tupev-scalar +a4 = (linspace(0,1,10), random((10)), (0.5,)) +# 2d-fev-scalar +a5 = (linspace(0,1,10), random((10)), 0.5) + +# 2d-carev-vec +b1 = (linspace(0,1,10), random((10,3)), linspace(0,1,200)) +# 2d-vecev-vec +b2 = (linspace(0,1,10), random((10,3)), random((200,1))) +# 2d-pointev-vec +b3 = (linspace(0,1,10), random((10,3)), array([0.5])) +# 2d-tupev-vec +b4 = (linspace(0,1,10), random((10,3)), (0.5,)) # unsupported +# 2d-fev-vec +b5 = (linspace(0,1,10), random((10,3)), 9.5) # unsupported + + +# 2d-carev-scalar +c1 = (linspace(0,1,10), linspace(0,1,20), random((10,20)), linspace(0,1,200), linspace(0,1,200)) +# 2d-vecev-scalar +c2 = (linspace(0,1,10), linspace(0,1,20), random((10,20)), random((200,2))) +# 2d-pointev-scalar +c3 = (linspace(0,1,10), linspace(0,1,20), random((10,20)), array([0.5,2.0])) +# 2d-tupev-scalar +c4 = (linspace(0,1,10), linspace(0,1,20), random((10,20)), (0.5,2.0)) +# 2d-fev-scalar +c5 = (linspace(0,1,10), linspace(0,1,20), random((10,20)), 0.5, 2.0) + +# 2d-carev-vecvec +d1 = (linspace(0,1,10), linspace(0,1,20), random((10,20,3)), linspace(0,1,200), linspace(0,1,200)) +# 2d-vecev-vec +d2 = (linspace(0,1,10), linspace(0,1,20), random((10,20,3)), random((200,2))) +# 2d-pointev-vec +d3 = (linspace(0,1,10), linspace(0,1,20), random((10,20,3)), array([0.5,2.0])) +# 2d-tupev-vec +d4 = (linspace(0,1,10), linspace(0,1,20), random((10,20,3)), (0.5,2.0)) # unsupported (return type not known) +d5 = (linspace(0,1,10), linspace(0,1,20), random((10,20,3)), 0.5, 2.0) # unsupported (return type not known) + + +tests = [a2, a3, a4, c1, c2, c3, c4] +tests_failing = [b1, b2, b3, b4, b5, d4, d5, d1, d2, d3, d4, d5] + +from interpolation.multilinear.mlinterp import mlinterp, interp + + +def test_mlinterp(): + + # simple multilinear interpolation api + + import numpy as np + from interpolation import mlinterp + + # from interpolation.multilinear.mlinterp import mlininterp, mlininterp_vec + x1 = np.linspace(0,1,10) + x2 = np.linspace(0,1,20) + y = np.random.random((10,20)) + + z1 = np.linspace(0,1,30) + z2 = np.linspace(0,1,30) + + pp = np.random.random((2000,2)) + + res0 = mlinterp((x1,x2), y, pp) + res0 = mlinterp((x1,x2), y, (0.1, 0.2)) + + +def test_multilinear(): + + # flat flexible api + + for t in tests: + + tt = [typeof(e) for e in t] + print(tt) + rr = interp(*t) + + try: + print(f"{tt}: {rr.shape}") + except: + print(f"{tt}: OK") + +# +# exit() +# +# ############################################################### +# # Now let's see what are the gains of jit for repeated callas # +# # with some unscientific performance benchmarks # +# ############################################################### +# +# N = 100000 +# points = np.random.rand(N,2) +# +# +# +# +# grid = ( +# (0.0, 1.0, 11), +# (0.0, 1.0, 11) +# ) +# +# vv = np.linspace(0,1,11) +# +# grid_uneven = ( +# vv, +# vv +# ) +# +# C = np.random.rand(11,11) +# +# # two equivalent calls: +# v = interp(grid, C, (0.3, 0.2)) +# v_unevn = interp(grid_uneven, C, (0.3, 0.2)) +# +# assert(abs(v_unevn-v)<1e-10) +# +# +# # +# # # let's compare with interp2d +# from scipy.interpolate import interp2d +# intp2 = interp2d(vv,vv,C.T) +# v_2d = intp2(0.3,0.2) +# assert(abs(v_2d-v)<1e-10) +# +# # and Regular Grid Interpolator +# from scipy.interpolate import RegularGridInterpolator +# vg = np.linspace(0,1,11) +# rgi = RegularGridInterpolator((vg,vg),C) +# v_rgi = rgi([0.3, 0.2])[0] +# assert(abs(v_rgi-v)<1e-10) +# +# +# +# vals = vec_interp(grid, C, points) +# vals_un = vec_interp(grid_uneven, C, points) +# vals_rgi = rgi(points) +# +# # both give the same result +# assert((abs(vals-vals_rgi).max()<1e-10)) +# assert((abs(vals-vals_un).max()<1e-10)) +# # +# # import time +# # K = 1000 +# # +# # t1_a = time.time() +# # for k in range(K): +# # vals = vec_interp(grid, C, points) +# # t1_b = time.time() +# # +# # t2_a = time.time() +# # for k in range(K): +# # vals_un = vec_interp(grid_uneven, C, points) +# # t2_b = time.time() +# # +# # t3_a = time.time() +# # for k in range(K): +# # vals_rgi = rgi(points) +# # t3_b = time.time() +# # +# # print(f"Even: {t1_b-t1_a}") +# # print(f"Uneven: {t2_b-t2_a}") +# # print(f"Scipy: {t3_b-t3_a}") diff --git a/interpolation/splines/gen_multilinear_irregular.py b/interpolation/splines/gen_multilinear_irregular.py new file mode 100644 index 0000000..eccc2fc --- /dev/null +++ b/interpolation/splines/gen_multilinear_irregular.py @@ -0,0 +1,38 @@ +max_d = 5 +# use_open_mp = True + +def index(inds): + return str.join('', [str(e) for e in inds] ) + +def rindex(binds): + # M = ['M_{}*'.format(i) for i in range(len(binds)-1)] + [''] + + N = ['(q_{}{})'.format(n,'+1'*i) for n,i in enumerate(binds)] + # return str.join(' , ', [ str.join('', e) for e in zip(M,N) ]) + return str.join(' , ', N ) + +def make_formula(d,ind,mm): + if len(ind) == d: + return 'v_{}'.format(index(ind)) + else: + j = len(ind) + ind1 = ind + (0,) + ind2 = ind + (1,) + s = "(1-lam_{j})*({a}) + (lam_{j})*({b})".format(j=j, a=make_formula(d,ind1,mm), b=make_formula(d,ind2,mm)) + return s + +formulas = [make_formula(i,tuple([]),None) for i in range(max_d+1)] +from itertools import product + + +with open('multilinear_irregular_numba.py.in') as f: + txt = f.read() + +import tempita +# template = tempita.Template(txt,name='multilinear_numba.py.in' ) +# code = template.substitute() + +code = tempita.sub(txt, max_d=max_d, product=product, index=index, rindex=rindex, formulas=formulas) + +with open('multilinear_irregular_numba.py','w') as f: + f.write(code) diff --git a/interpolation/splines/multilinear_irregular.py b/interpolation/splines/multilinear_irregular.py new file mode 100644 index 0000000..0d8b572 --- /dev/null +++ b/interpolation/splines/multilinear_irregular.py @@ -0,0 +1,22 @@ +# from .multilinear_irregular_numba import * + +import numba +# +@numba.njit +def mysum(a): + return a + +from numba.extending import overload +@overload(mysum) +def mysum2(a,b): + def ff(a,b): + return a + b + return ff + +mysum(1,2) +# +# @numba.njit +# def test(): +# return max(1,2) + max(1,2,3) +# +# print(test()) diff --git a/interpolation/splines/multilinear_irregular_numba.py b/interpolation/splines/multilinear_irregular_numba.py new file mode 100644 index 0000000..bf4a295 --- /dev/null +++ b/interpolation/splines/multilinear_irregular_numba.py @@ -0,0 +1,482 @@ +import numpy as np +from numba import njit + +@njit(cache=True) +def multilinear_irregular_1d_nonvec(x0, y, u0): + + order_0 = x0.shape[0] + + # (s_1, ..., s_d) : evaluation point + u_0 = u0 + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + + # v_ij: values on vertices of hypercube "containing" the point + v_0 = y[(q_0)] + v_1 = y[(q_0+1)] + + # interpolated/extrapolated value + return (1-lam_0)*(v_0) + (lam_0)*(v_1) + +@njit(cache=True) +def multilinear_irregular_1d(x0, y, u, output): + + d = 1 + N = u.shape[0] + + order_0 = x0.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + + # v_ij: values on vertices of hypercube "containing" the point + v_0 = y[(q_0)] + v_1 = y[(q_0+1)] + + # interpolated/extrapolated value + output[n] = (1-lam_0)*(v_0) + (lam_0)*(v_1) + +@njit(cache=True) +def multilinear_irregular_2d(x0, x1, y, u, output): + + d = 2 + N = u.shape[0] + + order_0 = x0.shape[0] + order_1 = x1.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + u_1 = u[ n, 1 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + q_1 = max( min( np.searchsorted(x1, u_1)-1, (order_1-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + lam_1 = (u_1-x1[q_1])/(x1[q_1+1]-x1[q_1]) + + # v_ij: values on vertices of hypercube "containing" the point + v_00 = y[(q_0) , (q_1)] + v_01 = y[(q_0) , (q_1+1)] + v_10 = y[(q_0+1) , (q_1)] + v_11 = y[(q_0+1) , (q_1+1)] + + # interpolated/extrapolated value + output[n] = (1-lam_0)*((1-lam_1)*(v_00) + (lam_1)*(v_01)) + (lam_0)*((1-lam_1)*(v_10) + (lam_1)*(v_11)) + +@njit(cache=True) +def multilinear_irregular_3d(x0, x1, x2, y, u, output): + + d = 3 + N = u.shape[0] + + order_0 = x0.shape[0] + order_1 = x1.shape[0] + order_2 = x2.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + u_1 = u[ n, 1 ] + u_2 = u[ n, 2 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + q_1 = max( min( np.searchsorted(x1, u_1)-1, (order_1-2) ), 0 ) + q_2 = max( min( np.searchsorted(x2, u_2)-1, (order_2-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + lam_1 = (u_1-x1[q_1])/(x1[q_1+1]-x1[q_1]) + lam_2 = (u_2-x2[q_2])/(x2[q_2+1]-x2[q_2]) + + # v_ij: values on vertices of hypercube "containing" the point + v_000 = y[(q_0) , (q_1) , (q_2)] + v_001 = y[(q_0) , (q_1) , (q_2+1)] + v_010 = y[(q_0) , (q_1+1) , (q_2)] + v_011 = y[(q_0) , (q_1+1) , (q_2+1)] + v_100 = y[(q_0+1) , (q_1) , (q_2)] + v_101 = y[(q_0+1) , (q_1) , (q_2+1)] + v_110 = y[(q_0+1) , (q_1+1) , (q_2)] + v_111 = y[(q_0+1) , (q_1+1) , (q_2+1)] + + # interpolated/extrapolated value + output[n] = (1-lam_0)*((1-lam_1)*((1-lam_2)*(v_000) + (lam_2)*(v_001)) + (lam_1)*((1-lam_2)*(v_010) + (lam_2)*(v_011))) + (lam_0)*((1-lam_1)*((1-lam_2)*(v_100) + (lam_2)*(v_101)) + (lam_1)*((1-lam_2)*(v_110) + (lam_2)*(v_111))) + +@njit(cache=True) +def multilinear_irregular_4d(x0, x1, x2, x3, y, u, output): + + d = 4 + N = u.shape[0] + + order_0 = x0.shape[0] + order_1 = x1.shape[0] + order_2 = x2.shape[0] + order_3 = x3.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + u_1 = u[ n, 1 ] + u_2 = u[ n, 2 ] + u_3 = u[ n, 3 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + q_1 = max( min( np.searchsorted(x1, u_1)-1, (order_1-2) ), 0 ) + q_2 = max( min( np.searchsorted(x2, u_2)-1, (order_2-2) ), 0 ) + q_3 = max( min( np.searchsorted(x3, u_3)-1, (order_3-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + lam_1 = (u_1-x1[q_1])/(x1[q_1+1]-x1[q_1]) + lam_2 = (u_2-x2[q_2])/(x2[q_2+1]-x2[q_2]) + lam_3 = (u_3-x3[q_3])/(x3[q_3+1]-x3[q_3]) + + # v_ij: values on vertices of hypercube "containing" the point + v_0000 = y[(q_0) , (q_1) , (q_2) , (q_3)] + v_0001 = y[(q_0) , (q_1) , (q_2) , (q_3+1)] + v_0010 = y[(q_0) , (q_1) , (q_2+1) , (q_3)] + v_0011 = y[(q_0) , (q_1) , (q_2+1) , (q_3+1)] + v_0100 = y[(q_0) , (q_1+1) , (q_2) , (q_3)] + v_0101 = y[(q_0) , (q_1+1) , (q_2) , (q_3+1)] + v_0110 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3)] + v_0111 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3+1)] + v_1000 = y[(q_0+1) , (q_1) , (q_2) , (q_3)] + v_1001 = y[(q_0+1) , (q_1) , (q_2) , (q_3+1)] + v_1010 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3)] + v_1011 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3+1)] + v_1100 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3)] + v_1101 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3+1)] + v_1110 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3)] + v_1111 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3+1)] + + # interpolated/extrapolated value + output[n] = (1-lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*(v_0000) + (lam_3)*(v_0001)) + (lam_2)*((1-lam_3)*(v_0010) + (lam_3)*(v_0011))) + (lam_1)*((1-lam_2)*((1-lam_3)*(v_0100) + (lam_3)*(v_0101)) + (lam_2)*((1-lam_3)*(v_0110) + (lam_3)*(v_0111)))) + (lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*(v_1000) + (lam_3)*(v_1001)) + (lam_2)*((1-lam_3)*(v_1010) + (lam_3)*(v_1011))) + (lam_1)*((1-lam_2)*((1-lam_3)*(v_1100) + (lam_3)*(v_1101)) + (lam_2)*((1-lam_3)*(v_1110) + (lam_3)*(v_1111)))) + +@njit(cache=True) +def multilinear_irregular_5d(x0, x1, x2, x3, x4, y, u, output): + + d = 5 + N = u.shape[0] + + order_0 = x0.shape[0] + order_1 = x1.shape[0] + order_2 = x2.shape[0] + order_3 = x3.shape[0] + order_4 = x4.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + u_1 = u[ n, 1 ] + u_2 = u[ n, 2 ] + u_3 = u[ n, 3 ] + u_4 = u[ n, 4 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + q_1 = max( min( np.searchsorted(x1, u_1)-1, (order_1-2) ), 0 ) + q_2 = max( min( np.searchsorted(x2, u_2)-1, (order_2-2) ), 0 ) + q_3 = max( min( np.searchsorted(x3, u_3)-1, (order_3-2) ), 0 ) + q_4 = max( min( np.searchsorted(x4, u_4)-1, (order_4-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + lam_1 = (u_1-x1[q_1])/(x1[q_1+1]-x1[q_1]) + lam_2 = (u_2-x2[q_2])/(x2[q_2+1]-x2[q_2]) + lam_3 = (u_3-x3[q_3])/(x3[q_3+1]-x3[q_3]) + lam_4 = (u_4-x4[q_4])/(x4[q_4+1]-x4[q_4]) + + # v_ij: values on vertices of hypercube "containing" the point + v_00000 = y[(q_0) , (q_1) , (q_2) , (q_3) , (q_4)] + v_00001 = y[(q_0) , (q_1) , (q_2) , (q_3) , (q_4+1)] + v_00010 = y[(q_0) , (q_1) , (q_2) , (q_3+1) , (q_4)] + v_00011 = y[(q_0) , (q_1) , (q_2) , (q_3+1) , (q_4+1)] + v_00100 = y[(q_0) , (q_1) , (q_2+1) , (q_3) , (q_4)] + v_00101 = y[(q_0) , (q_1) , (q_2+1) , (q_3) , (q_4+1)] + v_00110 = y[(q_0) , (q_1) , (q_2+1) , (q_3+1) , (q_4)] + v_00111 = y[(q_0) , (q_1) , (q_2+1) , (q_3+1) , (q_4+1)] + v_01000 = y[(q_0) , (q_1+1) , (q_2) , (q_3) , (q_4)] + v_01001 = y[(q_0) , (q_1+1) , (q_2) , (q_3) , (q_4+1)] + v_01010 = y[(q_0) , (q_1+1) , (q_2) , (q_3+1) , (q_4)] + v_01011 = y[(q_0) , (q_1+1) , (q_2) , (q_3+1) , (q_4+1)] + v_01100 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3) , (q_4)] + v_01101 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3) , (q_4+1)] + v_01110 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3+1) , (q_4)] + v_01111 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3+1) , (q_4+1)] + v_10000 = y[(q_0+1) , (q_1) , (q_2) , (q_3) , (q_4)] + v_10001 = y[(q_0+1) , (q_1) , (q_2) , (q_3) , (q_4+1)] + v_10010 = y[(q_0+1) , (q_1) , (q_2) , (q_3+1) , (q_4)] + v_10011 = y[(q_0+1) , (q_1) , (q_2) , (q_3+1) , (q_4+1)] + v_10100 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3) , (q_4)] + v_10101 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3) , (q_4+1)] + v_10110 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3+1) , (q_4)] + v_10111 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3+1) , (q_4+1)] + v_11000 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3) , (q_4)] + v_11001 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3) , (q_4+1)] + v_11010 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3+1) , (q_4)] + v_11011 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3+1) , (q_4+1)] + v_11100 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3) , (q_4)] + v_11101 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3) , (q_4+1)] + v_11110 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3+1) , (q_4)] + v_11111 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3+1) , (q_4+1)] + + # interpolated/extrapolated value + output[n] = (1-lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*((1-lam_4)*(v_00000) + (lam_4)*(v_00001)) + (lam_3)*((1-lam_4)*(v_00010) + (lam_4)*(v_00011))) + (lam_2)*((1-lam_3)*((1-lam_4)*(v_00100) + (lam_4)*(v_00101)) + (lam_3)*((1-lam_4)*(v_00110) + (lam_4)*(v_00111)))) + (lam_1)*((1-lam_2)*((1-lam_3)*((1-lam_4)*(v_01000) + (lam_4)*(v_01001)) + (lam_3)*((1-lam_4)*(v_01010) + (lam_4)*(v_01011))) + (lam_2)*((1-lam_3)*((1-lam_4)*(v_01100) + (lam_4)*(v_01101)) + (lam_3)*((1-lam_4)*(v_01110) + (lam_4)*(v_01111))))) + (lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*((1-lam_4)*(v_10000) + (lam_4)*(v_10001)) + (lam_3)*((1-lam_4)*(v_10010) + (lam_4)*(v_10011))) + (lam_2)*((1-lam_3)*((1-lam_4)*(v_10100) + (lam_4)*(v_10101)) + (lam_3)*((1-lam_4)*(v_10110) + (lam_4)*(v_10111)))) + (lam_1)*((1-lam_2)*((1-lam_3)*((1-lam_4)*(v_11000) + (lam_4)*(v_11001)) + (lam_3)*((1-lam_4)*(v_11010) + (lam_4)*(v_11011))) + (lam_2)*((1-lam_3)*((1-lam_4)*(v_11100) + (lam_4)*(v_11101)) + (lam_3)*((1-lam_4)*(v_11110) + (lam_4)*(v_11111))))) + +@njit(cache=True) +def multilinear_irregular_vector_1d(x0, y,u, output): + + d = 1 + N = u.shape[0] + n_x = y.shape[1] + + order_0 = x0.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + + for i_x in range(n_x): + + # v_ij: values on vertices of hypercube "containing" the point + v_0 = y[(q_0), i_x] + v_1 = y[(q_0+1), i_x] + + # interpolated/extrapolated value + output[n, i_x] = (1-lam_0)*(v_0) + (lam_0)*(v_1) + +@njit(cache=True) +def multilinear_irregular_vector_2d(x0, x1, y,u, output): + + d = 2 + N = u.shape[0] + n_x = y.shape[2] + + order_0 = x0.shape[0] + order_1 = x1.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + u_1 = u[ n, 1 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + q_1 = max( min( np.searchsorted(x1, u_1)-1, (order_1-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + lam_1 = (u_1-x1[q_1])/(x1[q_1+1]-x1[q_1]) + + for i_x in range(n_x): + + # v_ij: values on vertices of hypercube "containing" the point + v_00 = y[(q_0) , (q_1), i_x] + v_01 = y[(q_0) , (q_1+1), i_x] + v_10 = y[(q_0+1) , (q_1), i_x] + v_11 = y[(q_0+1) , (q_1+1), i_x] + + # interpolated/extrapolated value + output[n, i_x] = (1-lam_0)*((1-lam_1)*(v_00) + (lam_1)*(v_01)) + (lam_0)*((1-lam_1)*(v_10) + (lam_1)*(v_11)) + +@njit(cache=True) +def multilinear_irregular_vector_3d(x0, x1, x2, y,u, output): + + d = 3 + N = u.shape[0] + n_x = y.shape[3] + + order_0 = x0.shape[0] + order_1 = x1.shape[0] + order_2 = x2.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + u_1 = u[ n, 1 ] + u_2 = u[ n, 2 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + q_1 = max( min( np.searchsorted(x1, u_1)-1, (order_1-2) ), 0 ) + q_2 = max( min( np.searchsorted(x2, u_2)-1, (order_2-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + lam_1 = (u_1-x1[q_1])/(x1[q_1+1]-x1[q_1]) + lam_2 = (u_2-x2[q_2])/(x2[q_2+1]-x2[q_2]) + + for i_x in range(n_x): + + # v_ij: values on vertices of hypercube "containing" the point + v_000 = y[(q_0) , (q_1) , (q_2), i_x] + v_001 = y[(q_0) , (q_1) , (q_2+1), i_x] + v_010 = y[(q_0) , (q_1+1) , (q_2), i_x] + v_011 = y[(q_0) , (q_1+1) , (q_2+1), i_x] + v_100 = y[(q_0+1) , (q_1) , (q_2), i_x] + v_101 = y[(q_0+1) , (q_1) , (q_2+1), i_x] + v_110 = y[(q_0+1) , (q_1+1) , (q_2), i_x] + v_111 = y[(q_0+1) , (q_1+1) , (q_2+1), i_x] + + # interpolated/extrapolated value + output[n, i_x] = (1-lam_0)*((1-lam_1)*((1-lam_2)*(v_000) + (lam_2)*(v_001)) + (lam_1)*((1-lam_2)*(v_010) + (lam_2)*(v_011))) + (lam_0)*((1-lam_1)*((1-lam_2)*(v_100) + (lam_2)*(v_101)) + (lam_1)*((1-lam_2)*(v_110) + (lam_2)*(v_111))) + +@njit(cache=True) +def multilinear_irregular_vector_4d(x0, x1, x2, x3, y,u, output): + + d = 4 + N = u.shape[0] + n_x = y.shape[4] + + order_0 = x0.shape[0] + order_1 = x1.shape[0] + order_2 = x2.shape[0] + order_3 = x3.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + u_1 = u[ n, 1 ] + u_2 = u[ n, 2 ] + u_3 = u[ n, 3 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + q_1 = max( min( np.searchsorted(x1, u_1)-1, (order_1-2) ), 0 ) + q_2 = max( min( np.searchsorted(x2, u_2)-1, (order_2-2) ), 0 ) + q_3 = max( min( np.searchsorted(x3, u_3)-1, (order_3-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + lam_1 = (u_1-x1[q_1])/(x1[q_1+1]-x1[q_1]) + lam_2 = (u_2-x2[q_2])/(x2[q_2+1]-x2[q_2]) + lam_3 = (u_3-x3[q_3])/(x3[q_3+1]-x3[q_3]) + + for i_x in range(n_x): + + # v_ij: values on vertices of hypercube "containing" the point + v_0000 = y[(q_0) , (q_1) , (q_2) , (q_3), i_x] + v_0001 = y[(q_0) , (q_1) , (q_2) , (q_3+1), i_x] + v_0010 = y[(q_0) , (q_1) , (q_2+1) , (q_3), i_x] + v_0011 = y[(q_0) , (q_1) , (q_2+1) , (q_3+1), i_x] + v_0100 = y[(q_0) , (q_1+1) , (q_2) , (q_3), i_x] + v_0101 = y[(q_0) , (q_1+1) , (q_2) , (q_3+1), i_x] + v_0110 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3), i_x] + v_0111 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3+1), i_x] + v_1000 = y[(q_0+1) , (q_1) , (q_2) , (q_3), i_x] + v_1001 = y[(q_0+1) , (q_1) , (q_2) , (q_3+1), i_x] + v_1010 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3), i_x] + v_1011 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3+1), i_x] + v_1100 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3), i_x] + v_1101 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3+1), i_x] + v_1110 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3), i_x] + v_1111 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3+1), i_x] + + # interpolated/extrapolated value + output[n, i_x] = (1-lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*(v_0000) + (lam_3)*(v_0001)) + (lam_2)*((1-lam_3)*(v_0010) + (lam_3)*(v_0011))) + (lam_1)*((1-lam_2)*((1-lam_3)*(v_0100) + (lam_3)*(v_0101)) + (lam_2)*((1-lam_3)*(v_0110) + (lam_3)*(v_0111)))) + (lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*(v_1000) + (lam_3)*(v_1001)) + (lam_2)*((1-lam_3)*(v_1010) + (lam_3)*(v_1011))) + (lam_1)*((1-lam_2)*((1-lam_3)*(v_1100) + (lam_3)*(v_1101)) + (lam_2)*((1-lam_3)*(v_1110) + (lam_3)*(v_1111)))) + +@njit(cache=True) +def multilinear_irregular_vector_5d(x0, x1, x2, x3, x4, y,u, output): + + d = 5 + N = u.shape[0] + n_x = y.shape[5] + + order_0 = x0.shape[0] + order_1 = x1.shape[0] + order_2 = x2.shape[0] + order_3 = x3.shape[0] + order_4 = x4.shape[0] + + for n in range(N): + + # (s_1, ..., s_d) : evaluation point + u_0 = u[ n, 0 ] + u_1 = u[ n, 1 ] + u_2 = u[ n, 2 ] + u_3 = u[ n, 3 ] + u_4 = u[ n, 4 ] + + # q_k : index of the interval "containing" s_k + q_0 = max( min( np.searchsorted(x0, u_0)-1, (order_0-2) ), 0 ) + q_1 = max( min( np.searchsorted(x1, u_1)-1, (order_1-2) ), 0 ) + q_2 = max( min( np.searchsorted(x2, u_2)-1, (order_2-2) ), 0 ) + q_3 = max( min( np.searchsorted(x3, u_3)-1, (order_3-2) ), 0 ) + q_4 = max( min( np.searchsorted(x4, u_4)-1, (order_4-2) ), 0 ) + + # lam_k : barycentric coordinate in interval k + lam_0 = (u_0-x0[q_0])/(x0[q_0+1]-x0[q_0]) + lam_1 = (u_1-x1[q_1])/(x1[q_1+1]-x1[q_1]) + lam_2 = (u_2-x2[q_2])/(x2[q_2+1]-x2[q_2]) + lam_3 = (u_3-x3[q_3])/(x3[q_3+1]-x3[q_3]) + lam_4 = (u_4-x4[q_4])/(x4[q_4+1]-x4[q_4]) + + for i_x in range(n_x): + + # v_ij: values on vertices of hypercube "containing" the point + v_00000 = y[(q_0) , (q_1) , (q_2) , (q_3) , (q_4), i_x] + v_00001 = y[(q_0) , (q_1) , (q_2) , (q_3) , (q_4+1), i_x] + v_00010 = y[(q_0) , (q_1) , (q_2) , (q_3+1) , (q_4), i_x] + v_00011 = y[(q_0) , (q_1) , (q_2) , (q_3+1) , (q_4+1), i_x] + v_00100 = y[(q_0) , (q_1) , (q_2+1) , (q_3) , (q_4), i_x] + v_00101 = y[(q_0) , (q_1) , (q_2+1) , (q_3) , (q_4+1), i_x] + v_00110 = y[(q_0) , (q_1) , (q_2+1) , (q_3+1) , (q_4), i_x] + v_00111 = y[(q_0) , (q_1) , (q_2+1) , (q_3+1) , (q_4+1), i_x] + v_01000 = y[(q_0) , (q_1+1) , (q_2) , (q_3) , (q_4), i_x] + v_01001 = y[(q_0) , (q_1+1) , (q_2) , (q_3) , (q_4+1), i_x] + v_01010 = y[(q_0) , (q_1+1) , (q_2) , (q_3+1) , (q_4), i_x] + v_01011 = y[(q_0) , (q_1+1) , (q_2) , (q_3+1) , (q_4+1), i_x] + v_01100 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3) , (q_4), i_x] + v_01101 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3) , (q_4+1), i_x] + v_01110 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3+1) , (q_4), i_x] + v_01111 = y[(q_0) , (q_1+1) , (q_2+1) , (q_3+1) , (q_4+1), i_x] + v_10000 = y[(q_0+1) , (q_1) , (q_2) , (q_3) , (q_4), i_x] + v_10001 = y[(q_0+1) , (q_1) , (q_2) , (q_3) , (q_4+1), i_x] + v_10010 = y[(q_0+1) , (q_1) , (q_2) , (q_3+1) , (q_4), i_x] + v_10011 = y[(q_0+1) , (q_1) , (q_2) , (q_3+1) , (q_4+1), i_x] + v_10100 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3) , (q_4), i_x] + v_10101 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3) , (q_4+1), i_x] + v_10110 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3+1) , (q_4), i_x] + v_10111 = y[(q_0+1) , (q_1) , (q_2+1) , (q_3+1) , (q_4+1), i_x] + v_11000 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3) , (q_4), i_x] + v_11001 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3) , (q_4+1), i_x] + v_11010 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3+1) , (q_4), i_x] + v_11011 = y[(q_0+1) , (q_1+1) , (q_2) , (q_3+1) , (q_4+1), i_x] + v_11100 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3) , (q_4), i_x] + v_11101 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3) , (q_4+1), i_x] + v_11110 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3+1) , (q_4), i_x] + v_11111 = y[(q_0+1) , (q_1+1) , (q_2+1) , (q_3+1) , (q_4+1), i_x] + + # interpolated/extrapolated value + output[n, i_x] = (1-lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*((1-lam_4)*(v_00000) + (lam_4)*(v_00001)) + (lam_3)*((1-lam_4)*(v_00010) + (lam_4)*(v_00011))) + (lam_2)*((1-lam_3)*((1-lam_4)*(v_00100) + (lam_4)*(v_00101)) + (lam_3)*((1-lam_4)*(v_00110) + (lam_4)*(v_00111)))) + (lam_1)*((1-lam_2)*((1-lam_3)*((1-lam_4)*(v_01000) + (lam_4)*(v_01001)) + (lam_3)*((1-lam_4)*(v_01010) + (lam_4)*(v_01011))) + (lam_2)*((1-lam_3)*((1-lam_4)*(v_01100) + (lam_4)*(v_01101)) + (lam_3)*((1-lam_4)*(v_01110) + (lam_4)*(v_01111))))) + (lam_0)*((1-lam_1)*((1-lam_2)*((1-lam_3)*((1-lam_4)*(v_10000) + (lam_4)*(v_10001)) + (lam_3)*((1-lam_4)*(v_10010) + (lam_4)*(v_10011))) + (lam_2)*((1-lam_3)*((1-lam_4)*(v_10100) + (lam_4)*(v_10101)) + (lam_3)*((1-lam_4)*(v_10110) + (lam_4)*(v_10111)))) + (lam_1)*((1-lam_2)*((1-lam_3)*((1-lam_4)*(v_11000) + (lam_4)*(v_11001)) + (lam_3)*((1-lam_4)*(v_11010) + (lam_4)*(v_11011))) + (lam_2)*((1-lam_3)*((1-lam_4)*(v_11100) + (lam_4)*(v_11101)) + (lam_3)*((1-lam_4)*(v_11110) + (lam_4)*(v_11111))))) diff --git a/misc/speed_comparison.py b/misc/speed_comparison.py index 5ff16b3..9f6141c 100644 --- a/misc/speed_comparison.py +++ b/misc/speed_comparison.py @@ -1,4 +1,4 @@ -from interpolation.splines.eval_cubic_numba import vec_eval_cubic_spline_3 +from interpolation.splines.eval_cubic_numba import vec_eval_cubic_spline_3, vec_eval_cubic_spline_2 from interpolation.splines.filter_cubic import filter_coeffs from interpolation.splines.multilinear_numba import multilinear_interpolation @@ -7,23 +7,28 @@ import numpy K = 50 -d = 3 +d = 2 N = 10**6 # N = 100 -a = numpy.array([0.0,0.0,0.0]) -b = numpy.array([1.0,1.0,1.0]) -orders = numpy.array([K,K,K],dtype=int) +a = numpy.array([0.0]*d) +b = numpy.array([1.0]*d) +orders = numpy.array([K]*d,dtype=int) V = numpy.random.random(orders) C = filter_coeffs(a,b,orders,V) -X = numpy.random.random((N,3)) +X = numpy.random.random((N,d)) res = numpy.zeros(N) res2 = res.copy() -vec_eval_cubic_spline_3(a,b,orders,C,X,res) +if d==3: + vec_eval_cubic_spline = vec_eval_cubic_spline_3 +elif d==2: + vec_eval_cubic_spline = vec_eval_cubic_spline_2 + +vec_eval_cubic_spline(a,b,orders,C,X,res) multilinear_interpolation(a,b,orders,V,X,res) @@ -32,7 +37,7 @@ import time t1 = time.time() -vec_eval_cubic_spline_3(a,b,orders,C,X,res) +vec_eval_cubic_spline(a,b,orders,C,X,res) t2 = time.time() multilinear_interpolation(a,b,orders,V,X,res2) t3 = time.time() @@ -46,9 +51,21 @@ # scipy from scipy.interpolate import RegularGridInterpolator -pp = [numpy.linspace(a[i],b[i],orders[i]) for i in range(3)] +pp = [numpy.linspace(a[i],b[i],orders[i]) for i in range(d)] rgi = RegularGridInterpolator(pp, V) t1 = time.time() rgi(X) t2 = time.time() print("Scipy (linear): {}".format(t2-t1)) + + +# new multilinear +from interp_experiment import vec_interp +grid = ((a[0],b[0],orders[0]), (a[1],b[1],orders[1])) + +grid = ((0.0,1.0,50),(0.0,1.0,50)) +res2 = vec_interp(grid,V,X) # warmup +t2 = time.time() +res2 = vec_interp(grid,V,X) +t3 = time.time() +print("mlinterp (linear): {}".format(t3-t2)) diff --git a/setup.py b/setup.py index 0d3d26f..a4acc5c 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ setup( name='interpolation', - version='0.1.9', + version='0.2.0', description='Interpolation in Python', url='https://github.com/econforge/interpolation.py', author='Chase Coleman, Spencer Lyon and Pablo Winant',