Commit 513729ad authored by Jeremy BLEYER's avatar Jeremy BLEYER

Add files

parent 10ac80d1
import numpy as np
from numba import jit, float64, int64, prange, generated_jit
import matplotlib.pyplot as plt
import time
from scipy.optimize import fsolve
from greenOperatorConductionNumba import *
from microstructure import *
@jit
def nullifyField(field,N):
for n in np.ndindex(N):
field[n] = np.zeros(len(N))
@jit
def rec_fill(t,N):
d=len(N)
if t[0].shape==(d,d):
for ii in prange(N[0]):
t[ii]=np.eye(d)
else:
for ii in prange(N[0]):
rec_fill(t[ii],N)
#La fonction Ref est un algorithme de type Moulinec et Suquet qui permet de calculer la conductivit homognise (kref) partir d'une 'image' (ki, qui est un tableau contenant les conductivits de chaque pixel de l'image). N est la taille de l'image. tau_field est le champ de polarisation qui sert de point de dpart de l'algorithme. Et k0 est la conductivit de rfrence utilise pour le calcul.
def Ref(tau_field,N,k0,ki):
d=len(N)
filter_level=2
Eps_f=np.zeros(tuple(N)+(d,d))
prevEpsf=np.zeros(tuple(N)+(d,d))
for j in range(d):
tau = np.zeros(tuple(N)+(d,))
for i in np.ndindex(tuple(N)):
tau[i]=tau_field[i][:,j]
it=0
err=1.
preverr=1.
krefj=k0[:,j]
while err>1e-4 or preverr>1e-4:
print(it)
field,field_fourier,fft,ifft,tupleK,frequencies,filters,tupleFilters = initializeGreen(N,filter_level=filter_level)
nullifyField(field,tuple(N))
Eps_field = np.zeros(tuple(N)+(d,))
Sig_field = np.zeros(tuple(N)+(d,))
for i in np.ndindex(tuple(N)):
Eps_field[i+(j,)]=1.
for i in np.ndindex(tuple(N)+(d,)):
field[i] = tau[i]
field=operate_field(field,field_fourier,fft,ifft,tupleK,N,frequencies,filter_level,filters,tupleFilters,k0)
for i in np.ndindex(tuple(N)):
Eps_field[i]+=field[i]
for i in np.ndindex(tuple(N)):
tau[i]=np.dot(ki[i]-k0,Eps_field[i])
it+=1
preverr=err
prevkrefjj=krefj[j]
krefj=np.zeros((d,))
for i in np.ndindex(tuple(N)):
krefj+=np.dot(ki[i],Eps_field[i])/N[0]**d
err=np.abs((krefj[j]-prevkrefjj)/prevkrefjj)
print(krefj)
for i in np.ndindex(tuple(N)+(d,)):
Eps_f[i+(j,)]=Eps_field[i]
prevEpsf[i+(j,)]=prevEps[i]
for i in np.ndindex(tuple(N)):
Sig_field[i]=np.dot(ki[i],Eps_field[i])
plt.figure()
plt.imshow(Sig_field[:,:,0],origin='lower')
plt.show()
plt.close()
kref=np.zeros((d,d))
for i in np.ndindex(tuple(N)):
kref+=np.dot(ki[i],Eps_f[i])/N[0]**d
return kref
import numpy as np
from numba import jit, float64, int64, prange
import matplotlib.pyplot as plt
import time
from scipy.optimize import fsolve
from greenOperatorConductionNumba import *
#from discretizationGrid import *
#from constitutiveLaw import *
from microstructure import *
@jit
def nullifyField(field,N):
#z=np.zeros(d)
for n in np.ndindex(N):
field[n] = np.zeros(len(N))
def getLocalGreenNumba(M,k0):
d=len(M)
filter_level=2
field,field_fourier,fft,ifft,tupleK,frequencies,filters,tupleFilters = initializeGreen(M,filter_level=filter_level)
gammaLocal = np.zeros(tuple(d*[3] + 2*[d]))
for i in range(d):
nullifyField(field,tuple(M))
field[tuple(d*[0]+[i])] = 1
operate_field(field_fourier,field_fourier,fft,ifft,tupleK,M,frequencies,filter_level,filters,tupleFilters,k0)
for m in np.ndindex(tuple(d*[3])):
s = tuple([m[j]-1 for j in range(d)])
gammaLocal[s][i]+=field[s]
return gammaLocal
"""
def getLocalGreen(M,k0):
d = len(M)
grid = discretizationGrid(M)
gamma = greenOperatorConduction(grid,k0,discretization="truncated",filter_level=2)
gammaLocal = np.zeros(tuple(d*[3] + 2*[gamma.numComp]))
for i in range(gamma.numComp):
for n in np.ndindex(tuple(M)):
gamma.field[n] = np.zeros(d)
gamma.field[tuple(d*[0]+[i])] = 1
gamma.operate_field()
for m in np.ndindex(tuple(d*[3])):
s = tuple([m[j]-1 for j in range(d)])
gammaLocal[s][i]+=gamma.field[s]
return gammaLocal
"""
def getGreenMatrix(M,k0,gammaLocal,shifts):
d=len(M)
numDOF = d*2**d
matrix = np.zeros((numDOF,numDOF))
i = 0
for I in np.ndindex(shifts):
j = 0
for J in np.ndindex(shifts):
shift=tuple(np.array(I)-np.array(J))
matrix[d*i:d*(i+1),d*j:d*(j+1)] -= gammaLocal[shift]
j+=1
i+=1
return matrix
def resetReference(N,k0,greenMatrices,phaseProps,phaseInvDeltaProp):
M=2*np.ones(N.shape,dtype=np.int64)
if k0.__class__ == np.ndarray:
refProp = k0
else:
refProp = k0*np.eye(len(N))
for i in range(len(phaseProps)):
kloc=phaseProps[i]
phaseInvDeltaProp[i] = np.linalg.inv(np.eye(d)*kloc-refProp)
depth = 0
shifts = tuple(d*[2])
while M[0]<=N[0]:
gammaLocal = getLocalGreenNumba(M,k0)
greenMatrices[depth] = getGreenMatrix(M,k0,gammaLocal,shifts)
M*=2
depth+=1
return(refProp)
@jit(nopython=True)#(float64[:,:](int64,float64[:,:],float64[:,:],float64[:,:]))
def getEquivalentProperty(d, localProperties, refProp, greenMatrix):
#Tk = array of tensors obtained by inversion of HS matrix
#Ck-1 = Cref + avg(Tk):inv(avg(Mk:Tk))
matrix = localProperties+greenMatrix
#print(matrix)
numDOF = len(matrix)#d*2**d
avgT = np.zeros((d,d),dtype=np.float64)
avgMT = np.zeros((d,d),dtype=np.float64)
rhs = np.zeros(numDOF,dtype=np.float64)
for i in range(d):
macroGradient = np.array([1. if i==j else 0. for j in range(d)],dtype=np.float64)
for cell in range(2**d):
rhs[d*cell:d*(cell+1)] = macroGradient
tau = np.linalg.solve(matrix,rhs)
eps = localProperties.dot(tau)
for cell in range(2**d):
avgT[i]+=tau[d*cell:d*(cell+1)]
avgMT[i]+=eps[d*cell:d*(cell+1)]
avgT[i]/=2**d
avgMT[i]/=2**d
return avgMT.dot(np.linalg.inv(avgT))#refProp+avgT.dot(np.linalg.inv(avgMT))
@jit(nopython=True)#(float64[:,:](int64,int64[:],int64[:],int64,float64[:,:],float64[:],float64[:,:],float64[:,:,:]))
def recursion(depth, M, pshifts, position, stride, phasemap, mapStrides, phaseProps, refProp, greenMatrices):
d=len(M)
localProperties = np.zeros(greenMatrices[0].shape)
#i=0
#for shift in np.ndindex(shifts):#tuple(d*[2])):
for i in prange(2**d):
nextPosition = position+stride*pshifts[i]#np.array(shift)
if stride > 1:
#localProperties[d*i:d*(i+1),d*i:d*(i+1)] = np.linalg.inv(recursion(depth+1,M*2,shifts,nextPosition,stride/2, phasemap, mapStrides, phaseProps, refProp, greenMatrices)-refProp)
localProperties[d*i:d*(i+1),d*i:d*(i+1)] = recursion(depth+1,M*2,pshifts,nextPosition,stride/2, phasemap, mapStrides, phaseProps, refProp, greenMatrices)
else:
n = position+stride*pshifts[i]#np.array(shift) #tuple() constructor not supported by numba
nflat = np.sum(n*mapStrides)
#kloc=phaseProps[phasemap[nflat]]
#localProperties[d*i:d*(i+1),d*i:d*(i+1)] = np.linalg.inv(np.eye(d)*kloc-refProp)
localProperties[d*i:d*(i+1),d*i:d*(i+1)] = phaseProps[phasemap[nflat]]
# i+=1
equivalentProperty = getEquivalentProperty(d, localProperties, refProp, greenMatrices[depth])
return equivalentProperty
@jit(nopython=True,parallel=False)#very minor speed up, communication problem ?
def recursionParallel(depth, M, pshifts, position, stride, phasemap, mapStrides, phaseProps, refProp, greenMatrices):
d=len(M)
numDOF = d*2**d
localProperties = np.zeros((numDOF,numDOF))#np.zeros(greenMatrices[0].shape)
for i in prange(2**d):
nextPosition = position+stride*pshifts[i]
localProperties[d*i:d*(i+1),d*i:d*(i+1)] = recursion(depth+1,M*2,pshifts,nextPosition,stride/2, phasemap, mapStrides, phaseProps, refProp, greenMatrices)#do not use tuple as arguments in parallel mode
equivalentProperty = getEquivalentProperty(d, localProperties, refProp, greenMatrices[depth])
return equivalentProperty
def hierachicHomogenize(N,phasemap,phaseProps):
d=len(N)
phaseInvDeltaProp = np.zeros((len(phaseProps),d,d))
numDOF = d*2**d
greenMatrices=np.zeros((int(np.log2(N[0])),numDOF,numDOF),dtype=np.float64)
shifts = tuple(d*[2])
pshifts = np.zeros((d**2,d),dtype=np.int64)
i=0
for shift in np.ndindex(shifts):
pshifts[i] += np.array(shift)
i+=1
k0=np.mean(phaseProps)*np.eye(len(N))
refProp = resetReference(N,k0,greenMatrices,phaseProps,phaseInvDeltaProp)
print("delta\treset(s)\trecurrence(s)")
delta = 1
while delta > 1e-3:
M=2*np.ones(N.shape,dtype=np.int64)
depth = 0
position=np.zeros(N.shape,dtype=np.int64)
stride = N0/2
start = time.time()
invdk = recursionParallel(depth, M, pshifts, position, stride, phasemap, mapStrides, phaseInvDeltaProp, refProp, greenMatrices)
kest = refProp+np.linalg.inv(invdk)
end = time.time()
recursionTime = end - start
prevk0 = k0
k0 = (kest+kest.transpose())/2#np.diag(np.diag(kest))
delta = np.linalg.norm(k0-prevk0)/np.linalg.norm(k0)
start = time.time()
#refProp = resetReference(N,k0,greenMatrices)
refProp = resetReference(N,k0,greenMatrices,phaseProps,phaseInvDeltaProp)
end = time.time()
resetTime = end-start
#print(k0)
print("%e\t%s\t%s" % (delta,resetTime,recursionTime))
return kest
print(kest)
khom.append(kest)
phi=np.sum(phasemap)*1./N0**d
frac.append(phi)
print("voigt %f" % ((1-phi)*k1+phi*k2))
print("reuss %f" % (1./((1-phi)/k1+phi/k2)))
N0 = 128
d=3
N=N0*np.ones(d,dtype=np.int64)
radius = 8
shape = {2:"disc",3:"sphere"}
V = pi*radius**2 if d==2 else 4*pi/3*radius**3
k1 = 1.
k2 = 0.001
Khom = []
Frac = []
for model in ["A","B"]:
if model == "A":
phaseProps = np.array([k1,k2],dtype=np.float64)
elif model == "B":
phaseProps = np.array([k2,k1],dtype=np.float64)
khom=[]
frac=[]
for target_phi in range(75,99,4):
ncenter = int(-np.log(1-target_phi/100.)*N0**d/V)
centers = np.random.rand(ncenter,d)*N0
radii = np.ones(ncenter)*radius
microstructure = booleanSpheres(centers,radii,N)
#microstructure = booleanSpheres(np.array([[0,0],[N0/3,N0/3],[3*N0/4,N0/2]]),[N0/3,N0/3,N0/3],N)
mapStrides = np.array([N0**(d-i-1) for i in range(d)],dtype=np.int64)
phasemap = microstructure.phasemap.flatten() #TODO : flatten to allow scalar index (see numba issue with tuple index)
phi=np.sum(phasemap)*1./N0**d
frac.append(phi)
print("target phi %f, achieved phi %f, num. centers %d" % (target_phi/100.,phi,ncenter))
#microstructure = square(N0/2)
"""
plt.figure()
plt.imshow(microstructure.phasemap, cmap="Greys")
plt.show(block=False)
"""
khh = hierachicHomogenize(N,phasemap,phaseProps)
print(khh)
khom.append(khh)
print("voigt %f" % ((1-phi)*k1+phi*k2))
print("reuss %f" % (1./((1-phi)/k1+phi/k2)))
Khom.append(np.array(khom))
Frac.append(np.array(frac))
savefile=open("boolean_%s_contrast%dmodel%s.txt" % (shape[d],int(k1/k2),model), 'ab')
np.savetxt(savefile,np.array([[f,np.trace(kh)/d] for (f,kh) in zip(frac,khom)]))
savefile.close()
Kmoritanaka = lambda x,k1,k2,d : ((1-x)*k1+x*k2*d*k1/((d-1)*k1+k2))/((1-x)+x*d*k1/((d-1)*k1+k2))
eqKselfconsistent = lambda k,x,k1,k2,d : 1e6 if k <0 else ((1-x)*(k1-k)*k*d/((d-1)*k+k1)+x*(k2-k)*k*d/((d-1)*k+k2))
Kselfconsistent= lambda x,k1,k2,d : fsolve(eqKselfconsistent,(k1+k2)/2,args=(x,k1,k2,d))
x=np.linspace(0,1,101)
plt.plot(x,Kmoritanaka(x,k1,k2,d),label="Hashin-Shtrikman")
plt.plot(x,Kmoritanaka(1-x,k2,k1,d),label="Hashin-Shtrikman")
plt.plot(x,[Kselfconsistent(f2,k1,k2,d) for f2 in x],label="Self-Consistent")
resA = np.loadtxt("boolean_%s_contrast%dmodel%s.txt" % (shape[d],int(k1/k2),"A"))
plt.plot(resA[:,0],resA[:,1],'k.',label="hierachical num. self-consistent A")
resB = np.loadtxt("boolean_%s_contrast%dmodel%s.txt" % (shape[d],int(k1/k2),"B"))
plt.plot(1-resB[:,0],resB[:,1],'k+',label="hierachical num. self-consistent B")
plt.legend(loc="upper right")
plt.title(r"boolean %s contrast = %d" % (shape[d],int(k1/k2)))
plt.xlabel("volume fraction of insulating phase")
plt.ylabel(r"$K_{hom} / K_{conductive \, phase}$")
plt.savefig("boolean_%s_contrast%d.pdf" % (shape[d],int(k1/k2)))
plt.show()
from dolfin import *
import numpy as np
import matplotlib.pyplot as plt
from Reference import reference
from math import *
from random import uniform
def homog_hierar(tabC,CVoigt):
# K0=np.array([[0.5,-0.5, 0. ],
# [-0.5, 0.5, 0. ],
# [0. , 0. , 1.]])
# J0=np.array([[0.5, 0.5, 0. ],
# [0.5, 0.5, 0. ],
# [0. , 0. , 0.]])
pp,qq,rr,ss=tabC.shape
p=int(log(pp)/log(2))
gamma0=np.array([[[0. for i in range(12)] for j in range(12)] for n in range(p)])
gamma12=np.array([[[0. for i in range(12)] for j in range(12)] for n in range(p)])
t0=np.load('influence_tensors-nu=0.0-patch_size=02-max_depth=10.npy')
t12=np.load('influence_tensors-nu=0.5-patch_size=02-max_depth=10.npy')
for n in range(p):
for j in range(4):
for k in range(3):
for i in range(4):
for l in range(3):
gamma0[n,3*i+l,3*j+k]=t0[9-n,i,j,l,k]
gamma12[n,3*i+l,3*j+k]=t12[9-n,i,j,l,k]
#la fonction auxiliary_problem calcule les A locaux : l'argument principal est C_list, la liste des tenseurs d'élasticité, et la fonction renvoie Aloc, un tableau à trois dimensions. Aloc[i] correspond à la moyenne du tenseur de localisation sur la case i, calculé par homogénéisation périodique
def auxiliary_problem(C_list, n,gamma,K):
M=np.array([[0. for i in range(12)] for j in range(12)])
for i in range(4):
S=np.linalg.inv(C_list[i]-K)
for k in range(3):
for l in range(3):
M[i*3+l,3*i+k]=S[l,k]
gamma_=gamma[n-1]
T=np.linalg.inv(M-gamma_)
Chom=np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
MT_=np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
T_=np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
E=np.array([np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]]) for i in range(4)])
MT=np.dot(M,T)
for k in range(3):
for l in range(3):
for j in range(4):
for i in range(4):
E[i,l,k]+=MT[3*i+l,3*j+k]
T_[l,k]+=0.25*T[3*i+l,3*j+k]
MT_[l,k]+=0.25*MT[3*i+l,3*j+k]
for i in range(4):
E[i]=np.dot(E[i],np.linalg.inv(MT_))
Chom=K+np.dot(T_,np.linalg.inv(MT_))
return Chom,E
#la fonction tC prend en argument un tableau de type tCA et renvoie le tableau de tenseurs d'élasticité qui sera celui de l'étape d'après
def tC(tC_,tA,n,gamma,K):
t1=np.array([[np.zeros((3,3)) for i in range(2**(n-1))] for j in range(2**(n-1))])
for i in range(2**(n-1)):
for j in range(2**(n-1)):
C_list=[tC_[2*i+1,2*j],tC_[2*i+1,2*j+1],tC_[2*i,2*j],tC_[2*i,2*j+1]]
t1[i,j],Aloc=auxiliary_problem(C_list, n,gamma,K)
for ii in range(2**(p-n)):
for jj in range(2**(p-n)):
tA[i*2**(p-n+1)+ii+2**(p-n),j*2**(p-n+1)+jj]=np.dot(tA[i*2**(p-n+1)+ii+2**(p-n),j*2**(p-n+1)+jj],Aloc[0])
for ii in range(2**(p-n)):
for jj in range(2**(p-n)):
tA[i*2**(p-n+1)+ii+2**(p-n),j*2**(p-n+1)+jj+2**(p-n)]=np.dot(tA[i*2**(p-n+1)+ii+2**(p-n),j*2**(p-n+1)+jj+2**(p-n)],Aloc[1])
for ii in range(2**(p-n)):
for jj in range(2**(p-n)):
tA[i*2**(p-n+1)+ii,j*2**(p-n+1)+jj]=np.dot(tA[i*2**(p-n+1)+ii,j*2**(p-n+1)+jj],Aloc[2])
for ii in range(2**(p-n)):
for jj in range(2**(p-n)):
tA[i*2**(p-n+1)+ii,j*2**(p-n+1)+jj+2**(p-n)]=np.dot(tA[i*2**(p-n+1)+ii,j*2**(p-n+1)+jj+2**(p-n)],Aloc[3])
return t1
#la fonction Chom prend en argument le tableau initial tC0 des tenseurs d'élasticité associés à la grille image de taille n, et appelle les fonctions décrites plus haut pour calculer les nouveaux tableaux de tenseurs d'élasticité associés à chaque étape. La fonction enregistre ces tableaux à chaque étape dans un fichier. Le nombre d'étapes 'e' doit être précisé pour des méthodes 10 ou 11.
def Chom(tC0,n,K_):
# K[2,2]=K[0,0]-K[0,1]
# mu=np.matrix.trace(np.dot(K,K0))/4
# km=np.matrix.trace(np.dot(K,J0))/3
# nu=(3*km-2*mu)/2/(3*km)
C_11=(K_[0,0]+K_[1,1])/2
C_12=(K_[0,1]+K_[1,0])/2
nu=1/(C_11/C_12+1)
mu=C_11/2*(1-2*nu)/(1-nu)
C11=2*mu*(1-nu)/(1-2*nu)
C22=C11
C12=nu/(1-nu)*C11
C33=2*mu
K=np.array([[C11,C12,0.],[C12,C22,0.],[0.,0.,C33]])
gamma=-((1-2*nu)*gamma0+nu*gamma12)/(1-nu)/mu
t=tC0
tA=np.array([[np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]]) for i in range(2**p)] for j in range(2**p)])
for i in range(n):
t2=tC(t,tA,n-i,gamma,K)
t=t2
return(K,t[0,0],tA)
C_hom1=CVoigt
err=1.0
it=1
C_hom_list=[]
tA_list=[]
while err > 1e-2:
print("itération "+str(it)+'\n')
C_hom=C_hom1
C0,C_hom1,tA=Chom(tabC,p,C_hom)
err=abs(C_hom1[0,0]-C_hom[0,0])/C_hom[0,0]
it+=1
C_hom_list.append(np.array([C0,C_hom1]))
tA_list.append(tA)
print(C0,C_hom1)
return C_hom_list,tA_list
from dolfin import *
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
def local_project(v,V):
dv = TrialFunction(V)
v_ = TestFunction(V)
a_proj = inner(dv,v_)*dx
b_proj = inner(v,v_)*dx
solver = LocalSolver(a_proj,b_proj)
solver.factorize()
u = Function(V)
solver.solve_local_rhs(u)
return u
#cf 'Contenu des fichiers' à propos de 'reference'
def reference(tabC,res1,c,ec,fr):
#res=256
k,ll,mm,nn=tabC.shape
mesh = UnitSquareMesh(res1, res1, "left/right")
class LocalC(UserExpression):
def __init__(self, Cij, mesh):
self.Cij = Cij
self.Nx, self.Ny, a, b = Cij.shape
assert a==3 and b==3, "Wrong shape"
self.mesh = mesh
UserExpression.__init__(self)
def eval_cell(self, value, x, ufc_cell):
p = Cell(mesh, ufc_cell.index).midpoint()
i, j = int(p[0]*(self.Nx)), int(p[1]*(self.Ny))
value[:] = self.Cij[j, i,:,:].flatten()
def value_shape(self):
return (3, 3)
#Ici on crée, à partir du tableau des tenseurs d'élasticité correspondant au domaine dont on veut calculer le Chom, un tableau Cv représentant le même Cn mais adapté à une résolution plus fine 'res'
# Cv = np.array([[np.zeros((3,3)) for i in range(res)] for j in range(res)])
# for i0 in range(k):
# for j0 in range(k):
# for i in range(int(i0*res/k),int((i0+1)*res/k)):
# for j in range (int(j0*res/k),int((j0+1)*res/k)):
# Cv[i,j]=tabC[i0,j0]
# plt.imshow(tabC[:,:,0,0])
# plt.show()
# plt.close()
# plt.imshow(Cv[:,:,0,0])
# plt.show()
# plt.close()
#Cvar est la transformation du tableau Cv en une fonction définie sur 'mesh'
Cvar= LocalC(tabC,mesh)
# class used to define the periodic boundary map
class PeriodicBoundary(SubDomain):
def __init__(self, vertices, tolerance=DOLFIN_EPS):
""" vertices stores the coordinates of the 4 unit cell corners"""
SubDomain.__init__(self, tolerance)
self.tol = tolerance
self.vv = vertices
self.a1 = self.vv[1,:]-self.vv[0,:] # first vector generating periodicity
self.a2 = self.vv[3,:]-self.vv[0,:] # second vector generating periodicity
# check if UC vertices form indeed a parallelogram
assert np.linalg.norm(self.vv[2, :]-self.vv[3, :] - self.a1) <= self.tol
assert np.linalg.norm(self.vv[2, :]-self.vv[1, :] - self.a2) <= self.tol
def inside(self, x, on_boundary):
# return True if on left or bottom boundary AND NOT on one of the
# bottom-right or top-left vertices
return bool((near(x[0], self.vv[0,0] + x[1]*self.a2[0]/self.vv[3,1], self.tol) or
near(x[1], self.vv[0,1] + x[0]*