import time import math import numpy as np import netCDF4 as cdf import matplotlib.tri as tri import matplotlib.pyplot as plt import dynamico.wrap as wrap from ctypes import c_void_p, c_int, c_double, c_bool radian=180/math.pi #------------- direct Cython interface to DYNAMICO routines -------------# cdef extern from "functions.h": cdef void dynamico_init_params() cpdef void dynamico_setup_xios() cpdef void dynamico_xios_set_timestep(double) cpdef void dynamico_xios_update_calendar(int) #------------- import and wrap DYNAMICO routines -------------# ker=wrap.Struct() # store imported fun X as funs.X check_args = False # use True instead of False for debugging, probably with some overhead try: kernels = wrap.SharedLib(vars(ker), 'libicosa.so', check_args=check_args) setvar, setvars, getvar, getvars = kernels.setvar, kernels.setvars, kernels.getvar, kernels.getvars except OSError: print """ Unable to load shared library 'libkernels.so' ! """ raise # providing a full prototype enables type-checking when calling # if a number n is present in the prototype, the previous type is repeated n times kernels.import_funs([ ['dynamico_setup_xios',None], ['dynamico_xios_set_timestep',c_double], ['dynamico_xios_update_calendar',c_int], # ['init_params',c_double], ['dynamico_init_mesh',c_void_p,13], ['dynamico_init_metric', c_void_p,6], ['dynamico_init_hybrid', c_void_p,3], ['dynamico_caldyn_unstructured', c_double, c_void_p,20], ['dynamico_partition_graph', c_int,2, c_void_p,3, c_int, c_void_p], ]) # set/get global variables eta_mass,eta_lag=(1,2) thermo_theta,thermo_entropy,thermo_moist,thermo_boussinesq=(1,2,3,4) kernels.addvars( c_bool,'hydrostatic','debug_hevi_solver','rigid', c_int,'llm','nqdyn','primal_num','max_primal_deg', 'dual_num','max_dual_deg','edge_num','max_trisk_deg', 'caldyn_thermo','caldyn_eta','nb_threads', c_double,'elapsed','g', 'ptop', 'cpp', 'cppv', 'Rd', 'Rv', 'preff', 'Treff', 'pbot', 'rho_bot', 'Phi_bot') elapsed=0. #----------------------------- Base class for dynamics ------------------------ class Caldyn: def __init__(self,mesh): self.mesh=mesh fps, ftheta, fmass = mesh.field_ps, mesh.field_theta, mesh.field_mass fw, fu, fz = mesh.field_w, mesh.field_u, mesh.field_z self.ps, self.ms, self.dms = fps(), fps(), fps() self.s, self.hs, self.dhs = ftheta(), ftheta(), ftheta() self.pk, self.berni, self.geopot, self.hflux = fmass(),fmass(),fw(),fu() self.qu, self.qv = fu(),fz() self.fmass, self.ftheta, self.fu, self.fw = fmass, ftheta, fu, fw def bwd_fast_slow(self, flow, tau): global elapsed time1=time.time() flow,fast,slow = self._bwd_fast_slow_(flow,tau) time2=time.time() elapsed=elapsed+time2-time1 return flow,fast,slow # when calling caldyn_unstructured, arrays for tendencies must be re-created each time # to avoid overwriting in the same memory space when time scheme is multi-stage #-------------------------- Shallow-water dynamics --------------------- class Caldyn_RSW(Caldyn): def __init__(self,mesh): Caldyn.__init__(self,mesh) setvars(('hydrostatic','caldyn_thermo','caldyn_eta'), (True,thermo_boussinesq,eta_lag)) self.dhs = self.fmass() dynamico_init_params() def _bwd_fast_slow_(self, flow, tau): h,u = flow # h*s = h => uniform buoyancy s=1 => shallow-water dh, du_slow, du_fast, hs, buf = self.fmass(), self.fu(), self.fu(), h.copy(), self.geopot ker.dynamico_caldyn_unstructured(tau, self.ms, h, hs, u, self.geopot, buf, self.s, self.ps, self.pk, self.hflux, self.qv, self.dms, dh, self.dhs, du_fast, du_slow, buf, buf, buf, buf) return (h,u), (0.,du_fast), (dh,du_slow) #----------------------------------- HPE ------------------------------------ class Caldyn_HPE(Caldyn): def __init__(self,caldyn_thermo,caldyn_eta, mesh,metric,thermo,BC,g): Caldyn.__init__(self,mesh) setvars(('hydrostatic','caldyn_thermo','caldyn_eta', 'g','ptop','Rd','cpp','preff','Treff'), (True,caldyn_thermo,caldyn_eta, g,BC.ptop,thermo.Rd,thermo.Cpd,thermo.p0,thermo.T0)) dynamico_init_params() def _bwd_fast_slow_(self, flow, tau): dm, dS, du_slow, du_fast, buf = self.fmass(), self.ftheta(), self.fu(), self.fu(), self.geopot m,S,u = flow ker.dynamico_caldyn_unstructured(tau, self.ms, m, S, u, self.geopot, buf, self.s, self.ps, self.pk, self.hflux, self.qv, self.dms, dm, dS, du_fast, du_slow, buf, buf, buf, buf) return (m,S,u), (0.,0.,du_fast), (dm,dS,du_slow) #----------------------------------- NH ------------------------------------ class Caldyn_NH(Caldyn): def __init__(self,caldyn_thermo,caldyn_eta, mesh,metric,thermo,BC,g): Caldyn.__init__(self,mesh) setvars(('hydrostatic','caldyn_thermo','caldyn_eta', 'g','ptop','Rd','cpp','preff','Treff', 'pbot','rho_bot'), (False,caldyn_thermo,caldyn_eta, g,BC.ptop,thermo.Rd,thermo.Cpd,thermo.p0,thermo.T0, BC.pbot.max(), BC.rho_bot.max())) dynamico_init_params() def bwd_fast_slow(self, flow, tau): ftheta, fmass, fu, fw = self.ftheta, self.fmass, self.fu, self.fw dm, dS, du_slow, du_fast = fmass(), ftheta(), fu(), fu() dPhi_slow, dPhi_fast, dW_slow, dW_fast = fw(), fw(), fw(), fw() m,S,u,Phi,W = flow ker.dynamico_caldyn_unstructured(tau, self.ms, m, S, u, Phi, W, self.s, self.ps, self.pk, self.hflux, self.qv, self.dms, dm, dS, du_fast, du_slow, dPhi_fast, dPhi_slow, dW_fast, dW_slow) return ((m,S,u,Phi,W), (0.,0.,du_fast,dPhi_fast,dW_fast), (dm,dS,du_slow,dPhi_slow,dW_slow)) #-------------------------------- Hybrid mass-based coordinate ------------- # compute hybrid coefs from distribution of mass def compute_hybrid_coefs(mass): nx,llm=mass.shape mass_dak = np.zeros((nx,llm)) mass_dbk = np.zeros((nx,llm)) mass_bl = np.zeros((nx,llm+1))+1. for i in range(nx): m_i = mass[i,:] dbk_i = m_i/sum(m_i) mass_dbk[i,:] = dbk_i mass_bl[i,1:]= 1.-dbk_i.cumsum() return mass_bl, mass_dak, mass_dbk #----------------------- Cartesian mesh ----------------------- def squeeze(dims): # return np.zeros(dims) return np.zeros([n for n in dims if n>1]) # arrays is a list of arrays # vals is a list of tuples # each tuple is stored in each array def put(ij, deg, arrays, vals): k=0 for vv in vals: # vv is a tuple of values to be stored in arrays for array,v in zip(arrays,vv): array[ij,k]=v k=k+1 deg[ij]=k class Cartesian_mesh: def __init__(self,nx,ny,llm,nqdyn,Lx,Ly,f): dx,dy = Lx/nx, Ly/ny self.dx, self.dy, self.f = dx,dy,f self.nx, self.ny, self.llm, self.nqdyn = nx,ny,llm,nqdyn self.field_z = self.field_mass # 1D coordinate arrays x=(np.arange(nx)-nx/2.)*Lx/nx y=(np.arange(ny)-ny/2.)*Ly/ny lev=np.arange(llm) levp1=np.arange(llm+1) self.x, self.y, self.lev, self.levp1 = x,y,lev,levp1 # 3D coordinate arrays self.xx,self.yy,self.ll = np.meshgrid(x,y,lev, indexing='ij') self.xxp1,self.yyp1,self.llp1 = np.meshgrid(x,y,levp1, indexing='ij') # beware conventions for indexing # Fortran order : llm,nx*ny,nqdyn / indices start at 1 # Python order : nqdyn,ny,nx,llm / indices start at 0 # indices below follow Fortran while x,y follow Python/C index=lambda x,y : ((x+(nx*(y+2*ny)))%(nx*ny))+1 indexu=lambda x,y : 2*index(x,y)-1 indexv=lambda x,y : 2*index(x,y) indices = lambda shape : np.zeros(shape,np.int32) primal_nb = indices(nx*ny) primal_edge = indices((nx*ny,4)) primal_ne = indices((nx*ny,4)) dual_nb = indices(nx*ny) dual_edge = indices((nx*ny,4)) dual_ne = indices((nx*ny,4)) dual_vertex = indices((nx*ny,4)) Riv2 = np.zeros((nx*ny,4)) left = indices(2*nx*ny) right = indices(2*nx*ny) up = indices(2*nx*ny) down = indices(2*nx*ny) le_de = np.zeros(2*nx*ny) trisk_deg = indices(2*nx*ny) trisk = indices((2*nx*ny,4)) # 4 TRiSK coefs per edge wee = np.zeros((2*nx*ny,4)) for x in range(nx): for y in range(ny): # NB : Fortran indices start at 1 # primal cells put(index(x,y)-1,primal_nb,(primal_edge,primal_ne), ((indexu(x,y),1), (indexv(x,y),1), (indexu(x-1,y),-1), (indexv(x,y-1),-1) )) # dual cells put(index(x,y)-1,dual_nb,(dual_edge,dual_vertex,dual_ne,Riv2), ((indexv(x+1,y),index(x,y),1,.25), (indexu(x,y+1),index(x+1,y),-1,.25), (indexv(x,y),index(x+1,y+1),-1,.25), (indexu(x,y),index(x,y+1),1,.25) )) # edges : # left and right are adjacent primal cells # flux is positive when going from left to right # up and down are adjacent dual cells (vertices) # circulation is positive when going from down to up # u-points ij =indexu(x,y)-1 left[ij]=index(x,y) right[ij]=index(x+1,y) down[ij]=index(x,y-1) up[ij]=index(x,y) le_de[ij]=dy/dx put(ij,trisk_deg,(trisk,wee),( (indexv(x,y),-.25), (indexv(x+1,y),-.25), (indexv(x,y-1),-.25), (indexv(x+1,y-1),-.25))) # v-points ij = indexv(x,y)-1 left[ij]=index(x,y) right[ij]=index(x,y+1) down[ij]=index(x,y) up[ij]=index(x-1,y) le_de[ij]=dx/dy put(ij,trisk_deg,(trisk,wee),( (indexu(x,y),.25), (indexu(x-1,y),.25), (indexu(x,y+1),.25), (indexu(x-1,y+1),.25))) setvars(('llm','nqdyn','edge_num','primal_num','dual_num', 'max_trisk_deg','max_primal_deg','max_dual_deg'), (llm,nqdyn,2*nx*ny,nx*ny,nx*ny,4,4,4) ) print('init_mesh ...') ker.dynamico_init_mesh(primal_nb,primal_edge,primal_ne, dual_nb,dual_edge,dual_ne,dual_vertex, left,right,down,up,trisk_deg,trisk) print ('...done') Aiv=np.zeros(nx*ny)+dx*dy # Ai=Av=dx*dy print('init_metric ...') ker.dynamico_init_metric(Aiv,Aiv,f+0.*Aiv,le_de,Riv2,-wee) print ('...done') def field_theta(self): return squeeze((self.nqdyn,self.ny,self.nx,self.llm)) def field_mass(self): return squeeze((self.ny,self.nx,self.llm)) def field_z(self): return squeeze((self.ny,self.nx,self.llm)) def field_w(self): return squeeze((self.ny,self.nx,self.llm+1)) def field_u(self): return np.zeros((self.ny,2*self.nx,self.llm)) def field_ps(self): return squeeze((self.ny,self.nx)) def ucomp(self,u): return u[:,range(0,2*self.nx,2),:] def set_ucomp(self,uv,u): uv[:,range(0,2*self.nx,2),:]=u def vcomp(self,u): return u[:,range(1,2*self.nx,2),:] #---------------------- MPAS fully unstructured mesh ------------------------ def compute_ne(num,deg,edges,left,right): ne = np.zeros_like(edges) for cell in range(num): for iedge in range(deg[cell]): edge = edges[cell,iedge]-1 if left[edge]==cell+1: ne[cell,iedge]+=1 if right[edge]==cell+1: ne[cell,iedge]-=1 if ne[cell,iedge]==0 : print 'error at cell,iedge', cell, iedge return ne def plot(tri,data): plt.figure(figsize=(12,4)) plt.gca().set_aspect('equal') plt.tricontourf(tri, data, 20) plt.colorbar() plt.ylim((-90,90)) plt.xlim((0,360)) class MPAS_Mesh: def __init__(self, gridfile, llm, nqdyn, radius, f): self.gridfile, self.llm, self.nqdyn = gridfile,llm,nqdyn self.radius, self.f = radius, f # open mesh file, get main dimensions try: nc = cdf.Dataset(gridfile, "r") except RuntimeError: print """ Unable to open grid file %s, maybe you forgot to download it ? To do so, go to the 'Python/' dir and execute './get_MPAS_grids.sh'. """ % gridfile raise def getdims(*names): return [len(nc.dimensions[name]) for name in names] def getvars(*names): for name in names : print "getvar %s ..."%name time1=time.time() ret=[nc.variables[name][:] for name in names] print "... Done in %f seconds"%(time.time()-time1) return ret primal_num, edge_num, dual_num = getdims('nCells','nEdges','nVertices') print 'Number of primal cells, dual cells and edges : %d, %d, %d '%(primal_num,dual_num,edge_num) primal_deg, trisk_deg = getvars('nEdgesOnCell','nEdgesOnEdge') dual_deg = [3 for i in range(dual_num) ] dual_deg = np.ascontiguousarray(dual_deg,dtype=np.int32) # NB : Fortran code expects 32-bit ints # get indices for stencils # primal -> vertices (unused) primal_vertex, dual_vertex = getvars('verticesOnCell','cellsOnVertex') # primal <-> edges primal_edge, left_right = getvars('edgesOnCell','cellsOnEdge') left,right = left_right[:,0], left_right[:,1] primal_ne = compute_ne(primal_num,primal_deg,primal_edge,left,right) # dual <-> edges dual_edge, down_up = getvars('edgesOnVertex','verticesOnEdge') down,up = down_up[:,0], down_up[:,1] dual_ne = compute_ne(dual_num,dual_deg,dual_edge,up,down) # primal <-> dual, edges <-> edges (TRiSK) dual_vertex, trisk = getvars('cellsOnVertex','edgesOnEdge') # get positions, lengths, surfaces and weights le,de,Ai,Av = getvars('dvEdge','dcEdge','areaCell','areaTriangle') lat_i,lon_i = getvars('latCell','lonCell') lat_v,lon_v = getvars('latVertex','lonVertex') lat_e,lon_e,angle_e = getvars('latEdge','lonEdge','angleEdge') wee,Riv2 = getvars('weightsOnEdge','kiteAreasOnVertex') # fix normalization of wee and Riv2 weights for edge1 in range(edge_num): for i in range(trisk_deg[edge1]): edge2=trisk[edge1,i]-1 # NB Fortran vs C/Python indexing wee[edge1,i] = de[edge1]*wee[edge1,i]/le[edge2] for ivertex in range(dual_num): for j in range(3): Riv2[ivertex,j]=Riv2[ivertex,j]/Av[ivertex] r=Riv2[ivertex,:] r=sum(r) if abs(r-1.)>1e-6 : print 'error with Riv2 at vertex ', ivertex max_primal_deg, max_dual_deg, max_trisk_deg = getdims('maxEdges','vertexDegree','maxEdges2') # CRITICAL : force arrays left, etc. to be contiguous in memory: left,right,up,down = [np.ascontiguousarray(x) for x in (left,right,up,down)] trisk,wee,primal_edge = [np.ascontiguousarray(x) for x in (trisk,wee,primal_edge)] print ('Max stencil sizes (div,curl,trisk) : %d, %d, %d' % (max_primal_deg, max_dual_deg, max_trisk_deg) ) r2 = radius**2 Av = r2*Av fv = f(lon_v,lat_v) # Coriolis parameter self.Ai, self.Av, self.fv = r2*Ai,Av,fv self.le, self.de, self.le_de = radius*le, radius*de, le/de self.trisk_deg, self.trisk, self.wee = trisk_deg, trisk, wee setvars(('llm','nqdyn','edge_num','primal_num','dual_num', 'max_trisk_deg','max_primal_deg','max_dual_deg'), (llm, nqdyn, edge_num, primal_num,dual_num, max_trisk_deg, max_primal_deg, max_dual_deg) ) ker.dynamico_init_mesh(primal_deg,primal_edge,primal_ne, dual_deg,dual_edge,dual_ne,dual_vertex, left,right,down,up,trisk_deg,trisk) ker.dynamico_init_metric(self.Ai,self.Av,self.fv,le/de,Riv2,wee) for edge in range(edge_num): iedge = trisk[edge,0:trisk_deg[edge]] if iedge.min()<1 : print 'error' self.primal_num, self.edge_num, self.dual_num = primal_num, edge_num, dual_num def period(x) : return (x+2*math.pi)%(2*math.pi) lon_i, lon_v, lon_e = map(period, (lon_i,lon_v,lon_e)) self.lon_i, self.lat_i = lon_i, lat_i self.lon_v, self.lat_v = lon_v, lat_v self.lon_e, self.lat_e, self.angle_e = lon_e, lat_e, angle_e self.primal_deg, self.primal_vertex = primal_deg, primal_vertex self.primal = tri.Triangulation(lon_i*180./math.pi, lat_i*180./math.pi) self.dual_deg, self.dual_vertex = dual_deg, dual_vertex self.dual = tri.Triangulation(lon_v*180./math.pi, lat_v*180./math.pi) self.triedge = tri.Triangulation(lon_e*180./math.pi, lat_e*180./math.pi) self.dx = de.min() self.lon3D_i, self.ll3D = np.meshgrid(lon_i, range(llm)) self.lat3D_i, self.ll3D = np.meshgrid(lat_i, range(llm)) def field_theta(self): return squeeze((self.nqdyn,self.primal_num,self.llm)) def field_mass(self): return squeeze((self.primal_num,self.llm)) def field_z(self): return squeeze((self.dual_num,self.llm)) def field_w(self): return squeeze((self.primal_num,self.llm+1)) def field_u(self): return squeeze((self.edge_num,self.llm)) def field_ps(self): return squeeze((self.primal_num,)) def ucov2D(self, ulon, ulat): return self.de*(ulon*np.cos(self.angle_e)+ulat*np.sin(self.angle_e)) def ucov3D(self, ulon, ulat): ucov = np.zeros((self.edge_num,ulon.shape[1])) for edge in range(self.edge_num): angle=self.angle_e[edge] ucov[edge,:] = self.de[edge]*(ulon[edge,:]*math.cos(angle)+ulat[edge,:]*math.sin(angle)) return ucov def plot_i(self,data): plot(self.primal,data) def plot_v(self,data): plot(self.dual,data) def plot_e(self,data): plot(self.triedge,data) #-------------------------------------- Mesh partitioning ------------------------------------------# # Helper functions and interface to ParMETIS # list_stencil converts an adjacency graph from array format index[num_cells, MAX_EDGES] to compressed format # loc_stencil returns the start/end indices (vtxdist) expected by ParMETIS # i.e. index[start:end] with start=vtxdist[cell], end=vtxdist[cell+1] lists the edges of cell 'cell' def list_stencil(degree, stencil, cond=lambda x:True): for i in range(degree.size): for j in range(degree[i]): s=stencil[i,j] if cond(s): yield stencil[i,j] def loc_stencil(degree, stencil): loc=0 for i in range(degree.size): yield loc loc=loc+degree[i] yield loc def partition_mesh(degree, stencil, nparts): # arguments : PArray1D and PArray2D describing mesh, number of desired partitions dim_cell, degree, stencil = degree.dim, degree.data, stencil.data comm, vtxdist, idx_start, idx_end = dim_cell.comm, dim_cell.vtxdist, dim_cell.start, dim_cell.end mpi_rank, mpi_size = comm.Get_rank(), comm.Get_size() adjncy_loc, xadj_loc = list_stencil(degree, stencil), loc_stencil(degree, stencil) adjncy_loc, xadj_loc = [np.asarray(list(x), dtype=np.int32) for x in (adjncy_loc, xadj_loc)] owner = np.zeros(idx_end-idx_start, dtype=np.int32); ker.dynamico_partition_graph(mpi_rank, mpi_size, vtxdist, xadj_loc, adjncy_loc, nparts, owner) return owner def partition_from_stencil(owner2, degree, stencil): # given a stencil dim1->dim2 and owner2 on dim2, define owner[i] on dim1 as min(stencil[i,:] if i is even, max if odd dim1, dim2= degree.dim, owner2.dim degree, stencil, n = degree.data, stencil.data, dim1.end-dim1.start cells2 = list_stencil(degree, stencil) cells2 = sorted(list(set(list(cells2)))) # list of cells for which we need to know owner2 get2 = dim2.getter(cells2) owner1, owner2, glob2loc = np.zeros(n, dtype=np.int32), get2(owner2), get2.dict for i in range(n): owners = [ owner2[glob2loc[stencil[i,j]]] for j in range(degree[i]) ] if i%2 == 0 : owner1[i] = min(owners) else : owner1[i] = max(owners) return owner1 def find_my_cells(owner): # a PArray1D containing the data returned by partition_mesh dim, comm, owner = owner.dim, owner.dim.comm, owner.data mpi_rank, mpi_size = comm.Get_rank(), comm.Get_size() cells=[set() for i in range(mpi_size)] for i in range(len(owner)): cells[owner[i]].add(dim.start+i) cells = [sorted(list(cells[i])) for i in range(mpi_size)] mycells = comm.alltoall(cells) mycells = sorted(sum(mycells, [])) # concatenate into a single list return mycells #---------------------------------- Stencil management ---------------------------------------# # Class Stencil represents an adjacency relationship (e.g. cell=>edges) # using adjacency information read from PArrays # It computes a list of "edges" adjacent to a given list of "cells" # This is used to form the sets E0 -> C0 -> E1 -> V1 -> E2 -> C1 # which are then used to form lists of global indices for V1,C1,E2 such that # C0, E0, E1 form contiguous subsets of C1, E2 starting from 0 def reindex(vertex_dict, degree, bounds): for i in range(degree.size): for j in range(degree[i]): bounds[i,j] = vertex_dict[bounds[i,j]] class Stencil_glob: def __init__(self, degree, neigh, weight=None): self.degree, self.neigh, self.weight = degree, neigh, weight def __call__(self, cells, neigh_dict=None): return Stencil(cells, self.degree, self.neigh, neigh_dict, self.weight) class Stencil: def __init__(self, cells, degree, neigh, neigh_dict, weight=None): get = degree.dim.getter(cells) mydegree, myneigh = [get(x) for x in (degree, neigh)] if not weight is None : myweight = get(weight) if neigh_dict is None : keep = lambda n : True else : # if neigh_dict is present, only neighbours in dict are retained keep = lambda n : n in neigh_dict neigh_set = list_stencil(mydegree, myneigh, keep) self.neigh_set = list(set(list(neigh_set) )) rej=0 for i in range(len(mydegree)): # keep only elements in neigh_dict, in-place k=0 for j in range(mydegree[i]): n=myneigh[i,j] if keep(n): myneigh[i,k]=n if not weight is None : myweight[i,k]=myweight[i,j] k=k+1 else: rej=rej+1 mydegree[i]=k if neigh_dict is None: neigh_dict = {j:i for i,j in enumerate(self.neigh_set)} myneigh_loc = reindex(neigh_dict, mydegree, myneigh) self.degree, self.neigh_glob, self.neigh_loc = mydegree, myneigh, myneigh_loc def progressive_iter(mylist, cell_lists): for thelist in cell_lists: mylist = mylist + list(set(thelist)-set(mylist)) yield mylist def progressive_list(*cell_lists): # cell_lists : a tuple of lists of global indices, with each list a subset of the next # returns : a list 'mylist' such that for each list 'thelist' in cell_lists, thelist = mylist[0:len(thelist)] # example : edge_list = progressive_list(E0,E1,E2) with E0,E1,E2 increasing lists of cell edges return list(progressive_iter([], cell_lists))