New URL for NEMO forge!   http://forge.nemo-ocean.eu

Since March 2022 along with NEMO 4.2 release, the code development moved to a self-hosted GitLab.
This present forge is now archived and remained online for history.
icb_pp.py in trunk/NEMOGCM/TOOLS/MISCELLANEOUS – NEMO

source: trunk/NEMOGCM/TOOLS/MISCELLANEOUS/icb_pp.py @ 6423

Last change on this file since 6423 was 6423, checked in by davestorkey, 9 years ago
  1. Bug fix for icb_combrest.py. Will now cope with a set of iceberg restart files with icebergs on the zeroth processor.
  2. Add nonzero sys.exit codes for icb_combrest.py and icb_pp.py.

See ticket #1708

  • Property svn:executable set to *
File size: 4.9 KB
Line 
1from netCDF4 import Dataset
2from argparse import ArgumentParser
3import numpy as np
4import sys
5
6#
7# Basic iceberg trajectory post-processing python script.
8# This script collates iceberg trajectories from the distributed datasets written
9# out by each processing region and rearranges the ragged arrays into contiguous
10# streams for each unique iceberg. The output arrays are 2D (ntraj, ntimes) arrays.
11# Note that some icebergs may only exist for a subset of the possible times. In these
12# cases the missing instances are filled with invalid (NaN) values.
13#
14
15parser = ArgumentParser(description='produce collated trajectory file from distributed output\
16                                     files, e.g. \n python ./icb_pp.py \
17                                     -t  trajectory_icebergs_004248_ -n 296 -o trajsout.nc' )
18
19parser.add_argument('-t',dest='froot',help='fileroot_of_distrbuted_data; root name of \
20                     distributed trajectory output (usually completed with XXXX.nc, where \
21                     XXXX is the 4 digit processor number)', 
22                     default='trajectory_icebergs_004248_')
23
24parser.add_argument('-n',dest='fnum',help='number of distributed files to process', 
25                     type=int, default=None)
26
27parser.add_argument('-o',dest='fout',help='collated_output_file; file name to receive the \
28                     collated trajectory data', default='trajsout.nc')
29
30args = parser.parse_args()
31
32default_used = 0
33if args.froot is None:
34    pathstart = 'trajectory_icebergs_004248_'
35    default_used = 1
36else:
37    pathstart = args.froot
38
39if args.fnum is None:
40    procnum = 0
41    default_used = 1
42else:
43    procnum = args.fnum
44
45if args.fout is None:
46    pathout = 'trajsout.nc'
47    default_used = 1
48else:
49    pathout = args.fout
50
51if default_used == 1:
52   print('At least one default value will be used; command executing is:')
53   print('icb_pp.py -t ',pathstart,' -n ',procnum,' -o ',pathout)
54
55if procnum < 1:
56   print('Need some files to collate! procnum = ',procnum)
57   sys.exit(11)
58
59icu = []
60times = []
61#
62# Loop through all distributed datasets to obtain the complete list
63# of iceberg identification numbers and timesteps
64#
65for n in range(procnum):
66 nn = '%4.4d' % n
67 fw = Dataset(pathstart+nn+'.nc')
68 if len(fw.dimensions['n']) > 0:
69   print pathstart+nn+'.nc'
70   ic = fw.variables['iceberg_number'][:,0]
71   ts = fw.variables['timestep'][:]
72   icv = np.unique(ic)
73   ts = np.unique(ts)
74   print('Min Max ts: ',ts.min(), ts.max())
75   print('Number unique icebergs= ',icv.shape[0])
76   icu.append(icv)
77   times.append(ts)
78 fw.close()
79#
80# Now flatten the lists and reduce to the unique spanning set
81#
82icu = np.concatenate(icu)
83icu = np.unique(icu)
84times = np.concatenate(times)
85times = np.unique(times)
86ntraj = icu.shape[0]
87print(ntraj, ' unique icebergs found across all datasets')
88print('Icebergs ids range from: ',icu.min(), 'to: ',icu.max())
89print('times range from:        ',times.min(), 'to: ', times.max())
90#
91# Declare 2-D arrays to receive the data from all files
92#
93nt = times.shape[0]
94lons = np.zeros((ntraj, nt))
95lats = np.zeros((ntraj, nt))
96tims = np.zeros((ntraj, nt))
97xis  = np.zeros((ntraj, nt))
98yjs  = np.zeros((ntraj, nt))
99#
100# initially fill with invalid data
101#
102lons.fill(np.nan)
103lats.fill(np.nan)
104xis.fill(np.nan)
105yjs.fill(np.nan)
106tims.fill(np.nan)
107#
108# loop through distributed datasets again, this time
109# checking indices against icu and times lists and
110# inserting data into the correct locations in the
111# 2-D collated sets.
112#
113for n in range(procnum):
114 nn = '%4.4d' % n
115 fw = Dataset(pathstart+nn+'.nc')
116# Note many distributed datafiles will contain no iceberg data
117# so skip quickly over these
118 m  = len(fw.dimensions['n'])
119 if m > 0:
120   inx = np.zeros(m, dtype=int)
121   tsx = np.zeros(m, dtype=int)
122   print pathstart+nn+'.nc'
123   ic = fw.variables['iceberg_number'][:,0]
124   ts = fw.variables['timestep'][:]
125   lns = fw.variables['lon'][:]
126   lts = fw.variables['lat'][:]
127   xxs = fw.variables['xi'][:]
128   yys = fw.variables['yj'][:]
129   for k in range(m):
130     inxx   = np.where(icu == ic[k])
131     inx[k] = inxx[0]
132   for k in range(m):
133     inxx   = np.where(times == ts[k])
134     tsx[k] = inxx[0]
135   lons[inx[:],tsx[:]] = lns[:]
136   lats[inx[:],tsx[:]] = lts[:]
137   tims[inx[:],tsx[:]] = ts[:]
138   xis[inx[:],tsx[:]] = xxs[:]
139   yjs[inx[:],tsx[:]] = yys[:]
140 fw.close()
141
142# Finally create the output file and write out the collated sets
143#
144fo = Dataset(pathout, 'w', format='NETCDF4')
145ntrj = fo.createDimension('ntraj', ntraj)
146nti  = fo.createDimension('ntime', None)
147olon = fo.createVariable('lon', 'f4',('ntraj','ntime'))
148olat = fo.createVariable('lat', 'f4',('ntraj','ntime'))
149otim = fo.createVariable('ttim', 'f4',('ntraj','ntime'))
150oxis = fo.createVariable('xis', 'f4',('ntraj','ntime'))
151oyjs = fo.createVariable('yjs', 'f4',('ntraj','ntime'))
152icbn = fo.createVariable('icbn', 'f4',('ntraj'))
153olon[:,:] = lons
154olat[:,:] = lats
155otim[:,:] = tims
156oxis[:,:] = xis
157oyjs[:,:] = yjs
158icbn[:] = icu
159fo.close()
Note: See TracBrowser for help on using the repository browser.