forked from Ouranosinc/obsflow
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathworkflow_obs.py
430 lines (385 loc) · 21.2 KB
/
workflow_obs.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
"""Workflow to extract obs data."""
import atexit
import logging
import os
import xarray as xr
import numpy as np
from dask import config as dskconf
from dask.distributed import Client
from dask.diagnostics import ProgressBar
import xclim
import xscen as xs
from xscen.config import CONFIG
# Load configuration
xs.load_config(
"paths_obs.yml", "config_obs.yml", verbose=(__name__ == "__main__"), reset=True
)
# get logger
if "logging" in CONFIG:
logger = logging.getLogger("xscen")
if __name__ == "__main__":
# set dask configuration
daskkws = CONFIG["dask"].get("client", {})
dskconf.set(**{k: v for k, v in CONFIG["dask"].items() if k != "client"})
# set xclim config to compute indicators on 3H data
xclim.set_options(data_validation="log")
# copy config to the top of the log file
if "logging" in CONFIG and "file" in CONFIG["logging"]["handlers"]:
f1 = open(CONFIG["logging"]["handlers"]["file"]["filename"], "a+")
f2 = open("config_obs.yml")
f1.write(f2.read())
f1.close()
f2.close()
# set email config
if "scripting" in CONFIG:
atexit.register(xs.send_mail_on_exit, subject=CONFIG["scripting"]["subject"])
# initialize Project Catalog (only do this once, if the file doesn't already exist)
if not os.path.exists(CONFIG["paths"]["project_catalog"]):
pcat = xs.ProjectCatalog.create(
CONFIG["paths"]["project_catalog"],
project=CONFIG["project"],
)
# load project catalog
pcat = xs.ProjectCatalog(CONFIG["paths"]["project_catalog"])
# set some recurrent variables
if CONFIG.get("to_dataset_dict", False):
tdd = CONFIG["to_dataset_dict"]
# --- EXTRACT---
if "extract" in CONFIG["tasks"]:
# iterate on types to extract (reconstruction, simulation)
for source_type, type_dict in CONFIG["extract"].items():
# filter catalog for data that we want
cat = xs.search_data_catalogs(**type_dict["search_data_catalogs"])
# iterate over ids from the search
for ds_id, dc in cat.items():
# attrs of current iteration that are relevant now
cur = {
"id": ds_id,
"xrfreq": "D",
"processing_level": "extracted",
}
# check if steps was already done
if not pcat.exists_in_cat(**cur):
with (
Client(**type_dict["dask"], **daskkws),
xs.measure_time(name=f"extract {ds_id}", logger=logger),
):
# create dataset from sub-catalog with right domain and periods
ds_dict = xs.extract_dataset(
catalog=dc,
**type_dict["extract_dataset"],
)
# iterate over the different datasets/frequencies
for key_freq, ds in ds_dict.items():
if type_dict.get("floor", False):
# won't be needing this when data is completely cleaned
ds["time"] = ds.time.dt.floor(type_dict["floor"])
# drop nans and stack lat/lon in 1d loc (makes code faster)
if type_dict.get("stack_drop_nans", False):
ds = xs.utils.stack_drop_nans(
ds,
ds[list(ds.data_vars)[0]]
.isel(time=0, drop=True)
.notnull(),
)
# save to zarr
path = CONFIG["paths"]["task"].format(**cur)
# try:
# xs.save_to_netcdf(ds=ds, filename=path.replace('zarr', 'nc'),
# rechunk=type_dict["save"]["rechunk"],
# # for some reason h5netcdf doesn't write correct files here
# netcdf_kwargs={'engine': 'netcdf4'})
# except:
# print(f'Couln\'t write file: {path.replace("zarr", "nc")}')
xs.save_to_zarr(ds=ds, filename=path, **type_dict["save"])
pcat.update_from_ds(ds=ds, path=path)
# --- REGRID ---
if "regrid" in CONFIG["tasks"]:
# get input and iter over datasets
input_dict = pcat.search(**CONFIG["regrid"]["inputs"]).to_dataset_dict(**tdd)
for key_input, ds_input in input_dict.items():
cur = {
"id": ds_input.attrs["cat:id"],
"xrfreq": ds_input.attrs["cat:xrfreq"],
"processing_level": "regridded",
}
if not pcat.exists_in_cat(**cur):
with (
Client(**CONFIG["regrid"]["dask"], **daskkws),
xs.measure_time(name=f"regrid {key_input}", logger=logger),
):
# get output grid
ds_grid = pcat.search(**CONFIG["regrid"]["output"]).to_dataset(
**tdd
)
# do regridding
ds_regrid = xs.regrid_dataset(
ds=ds_input,
ds_grid=ds_grid,
)
# save to zarr
path = f"{CONFIG['paths']['task']}".format(**cur)
xs.save_to_zarr(ds=ds_regrid, filename=path, **CONFIG["regrid"]["save"])
pcat.update_from_ds(ds=ds_regrid, path=path, info_dict=cur)
# --- CLEAN UP ---
if "cleanup" in CONFIG["tasks"]:
# get all datasets to clean up and iter
cu_cats = xs.search_data_catalogs(**CONFIG["cleanup"]["search_data_catalogs"])
for cu_id, cu_cat in cu_cats.items():
cur = {
"id": cu_id,
"xrfreq": "D",
"processing_level": "cleaned",
}
if not pcat.exists_in_cat(**cur):
with (
Client(**CONFIG["cleanup"]["dask"], **daskkws),
xs.measure_time(name=f"clean {cu_id}", logger=logger),
):
# put the individually adjusted variables back together in one ds
freq_dict = xs.extract_dataset(catalog=cu_cat)
# iter over dataset of different frequencies (usually just 'D')
for key_freq, ds in freq_dict.items():
# clean up the dataset
ds_clean = xs.clean_up(
ds=ds,
**CONFIG["cleanup"]["xscen_clean_up"],
)
# save and update
path = f"{CONFIG['paths']['task']}".format(**cur)
xs.save_to_zarr(ds_clean, path, **CONFIG["cleanup"]["save"])
pcat.update_from_ds(ds=ds_clean, path=path)
# --- INDICATORS ---
if "indicators" in CONFIG["tasks"]:
# get input and iter
dict_input = pcat.search(**CONFIG["indicators"]["inputs"]).to_dataset_dict(
**tdd
)
for key_input, ds_input in dict_input.items():
with (
Client(**CONFIG["indicators"]["dask"], **daskkws),
xs.measure_time(name=f"indicators {key_input}", logger=logger),
):
# compute indicators
dict_indicator = xs.compute_indicators(
ds=ds_input,
indicators=xs.indicators.select_inds_for_avail_vars(ds_input, CONFIG["indicators"]["path_yml"]),
)
# iter over output dictionary (keys are freqs)
for key_freq, ds_ind in dict_indicator.items():
cur = {
"id": ds_input.attrs["cat:id"],
"xrfreq": key_freq,
"processing_level": "indicators",
}
if not pcat.exists_in_cat(**cur):
# save to zarr
path_ind = f"{CONFIG['paths']['task']}".format(**cur)
xs.save_to_zarr(
ds_ind, path_ind, **CONFIG["indicators"]["save"]
)
pcat.update_from_ds(ds=ds_ind, path=path_ind)
# --- CLIMATOLOGIES ---
if "climatologies" in CONFIG["tasks"]:
# iterate over inputs
ind_dict = pcat.search(**CONFIG["aggregate"]["input"]["obs"]).to_dataset_dict(
**tdd
)
for key_input, ds_input in sorted(ind_dict.items()):
cur = {
"id": ds_input.attrs["cat:id"],
"xrfreq": ds_input.attrs["cat:xrfreq"],
"processing_level": "climatology",
}
# filter for testing
# only do ERA5 for now and crop it!
# if not all(s in key_input for s in ['ERA5', 'MS']):
# continue
# ds_input = ds_input.isel(lat=slice(100, 125), lon=slice(100, 120), )
if not pcat.exists_in_cat(**cur):
with (Client(**CONFIG["aggregate"]["dask"], **daskkws) as client,
xs.measure_time(name=f"climatologies {key_input}", logger=logger)
):
# compute climatological mean
all_periods = []
for period in CONFIG["aggregate"]["periods"]:
# # skip all data except bcs for period 1980-1985 ToDo: remove when tests finished
# if int(period[0]) == 1980 and int(period[1]) == 1985 and 'bcs' not in key_input:
# continue
# compute properties for period when contained in data
if ds_input.time.dt.year.min() <= int(period[0]) and \
ds_input.time.dt.year.max() >= int(period[1]):
logger.info(f"Computing climatology for {key_input} for period {period}")
# Calculate climatological mean --------------------------------
logger.info(f"Computing climatological mean for {key_input} for period {period}")
ds_mean = xs.aggregate.climatological_op(
ds=ds_input,
**CONFIG["aggregate"]["climatological_mean"],
periods=period,
rename_variables=True,
periods_as_dim=True,
)
# ds_mean = ds_mean.assign_coords(period=f'{period[0]}-{period[1]}')
# ds_mean = ds_mean.expand_dims(dim='period')
# ds_mean = ds_mean.drop_vars('horizon')
all_periods.append(ds_mean)
# Calculate interannual standard deviation, skipping intra-[freq] std --------------------
logger.info(f"Computing interannual standard deviation for {key_input} for period {period}")
ds_input_std = ds_input.filter_by_attrs(
description=lambda s: 'standard deviation' not in str(s)
)
ds_std = xs.aggregate.climatological_op(
ds=ds_input_std,
**CONFIG["aggregate"]["climatological_std"],
periods=period,
rename_variables=True,
periods_as_dim=True,
)
# ds_std = ds_std.assign_coords(period=f'{period[0]}-{period[1]}')
# ds_std = ds_std.expand_dims(dim='period')
# ds_std = ds_std.drop_vars('horizon')
all_periods.append(ds_std)
# Calculate climatological standard deviation --------------------
logger.info(
f"Computing climatological standard deviation for {key_input} for period {period}")
# ToDo: This could be generic to work for all terms involved, depending on the input freq
ds_std_clim = xr.Dataset()
with xr.set_options(keep_attrs=True):
for v_type in set([name.split('_')[0] for name in ds_input.data_vars]):
# pick the interannual standard deviation of the variable
ds_std_varname = [n for n in ds_std.data_vars if v_type in n and 'std' in n]
if len(ds_std_varname) == 1:
ds_std_varname = ds_std_varname[0]
else:
raise ValueError(
f"More than one variable found containing "
f"'{v_type}' and 'std' in {ds_std_varname}"
)
# pick the mean of intra-annual/monthly/seasonal standard deviation of the variable
ds_mean_varname = [n for n in ds_mean.data_vars if v_type in n and 'std' in n]
if len(ds_mean_varname) == 1:
ds_mean_varname = ds_mean_varname[0]
else:
raise ValueError(
f"More than one variable found containing "
f"'{v_type}' and 'std' in {ds_mean_varname}"
)
# calculate the total climatological standard deviation
new_varname = f'{v_type}_std_clim_total'
ds_std_clim[new_varname] = np.sqrt(
np.square(ds_std[ds_std_varname]) +
np.square(ds_mean[ds_mean_varname])
)
# add attributes
ds_std_clim[new_varname].attrs['description'] = \
f"{xclim.core.formatting.default_formatter.format_field(ds_mean.attrs['cat:xrfreq'], 'adj').capitalize()} " \
f"total standard deviation of {' '.join(ds_std[ds_std_varname].attrs['description'].split(' ')[-3:])}"
ds_std_clim[new_varname].attrs['long_name'] = ds_std_clim[new_varname].attrs[
'description']
all_periods.append(ds_std_clim)
# Calculate trends -----------------------------------------------
logger.info(f"Computing climatological linregress for {key_input} for period {period}")
# for testing DJF in seasonal change the period - ToDo: remove!
# if 'QS-DEC' in key_input:
# period = [str(int(year) - 1) for year in period]
ds_input_trend = ds_input[[v for v in ds_input.data_vars if 'mean' in v]]
# .isel(lat=slice(100, 105), lon=slice(100, 105), )
ds_trend = xs.aggregate.climatological_op(
ds=ds_input_trend,
**CONFIG["aggregate"]["climatological_trend"],
periods=period,
min_periods=0.7,
rename_variables=True,
periods_as_dim=True,
)
# ds_trend = ds_trend.assign_coords(period=f'{period[0]}-{period[1]}')
# ds_trend = ds_trend.expand_dims(dim='period')
# ds_trend = ds_trend.drop_vars('horizon')
all_periods.append(ds_trend)
# # remove all dates so that periods can be merged
# new_time = {
# 1: {'year': ['ANN']},
# 4: {'season': ['MAM', 'JJA', 'SON', 'DJF']},
# 12: {'month': list(xr.coding.cftime_offsets._MONTH_ABBREVIATIONS.values())},
# } # calendar.month_abbr[1:]
# all_periods = [ds.rename({'time': list(new_time[ds.time.size].keys())[0]})
# .assign_coords(new_time[ds.time.size]) for ds in all_periods]
logger.info(f"Merging climatology of periods for {key_input}")
#all_periods = client.scatter(all_periods)
ds_clim = xr.merge(all_periods, combine_attrs='override')
# save to zarr
# client.scatter(ds_clim)
path = f"{CONFIG['paths']['task']}".format(**cur)
xs.save_to_zarr(ds_clim, path, **CONFIG["aggregate"]["save"])
pcat.update_from_ds(ds=ds_clim.drop_vars(['time']), path=path)
# --- PLOTTING ---
if "plotting" in CONFIG["tasks"]:
# get input and iter
dict_input = pcat.search(**CONFIG["plotting"]["input"]["data"]).to_dataset_dict(**tdd)
for key_input, ds_input in sorted(dict_input.items()):
with (
Client(**CONFIG["plotting"]["dask"], **daskkws),
xs.measure_time(name=f"plotting {key_input}", logger=logger),
):
# practice with monthly or seasonal data
# if 'AS-JAN' in key_input: continue
if 'ERA5' not in key_input: continue
import cartopy.crs as ccrs
import figanos.matplotlib as fg
for period in [['haha', 'hihihi']]: #CONFIG["plotting"]["periods"]:
for ind in ds_input.data_vars.values():
plot_id = f"{ind.attrs['long_name'].lower()} - {ds_input.attrs['source']} ({period})"
# logger.info(f"Doing {plot_id}")
print(f"{plot_id}")
# # use Ouranos style
# fg.utils.set_mpl_style('ouranos')
# # Selecting a time and slicing our starting Dataset
#
# # defining our projection.
# projection = ccrs.LambertConformal()
# ds = ds_input['tg_mean_clim_mean'].isel(period=0, year=0)
# fg.gridmap(data, projection=projection,
# features=['coastline', 'ocean'], frame=True, show_time='lower left')
# --- ENSEMBLES ---
if "ensembles" in CONFIG["tasks"]:
# one ensemble (file) per level, per experiment, per xrfreq
for processing_level in CONFIG["ensembles"]["processing_levels"]:
ind_df = pcat.search(processing_level=processing_level).df
# iterate through available xrfreq, exp and variables
for experiment in ind_df.experiment.unique():
for xrfreq in ind_df.xrfreq.unique():
# get all datasets that go in the ensemble
ind_dict = pcat.search(
processing_level=processing_level,
experiment=experiment,
xrfreq=xrfreq,
).to_dataset_dict(**tdd)
cur = {
"processing_level": f"ensemble-{processing_level}",
"experiment": experiment,
"xrfreq": xrfreq,
}
if not pcat.exists_in_cat(**cur):
with (
Client(**CONFIG["ensembles"]["dask"], **daskkws),
xs.measure_time(
name=f"ens-{processing_level} {experiment} {xrfreq}",
logger=logger,
),
):
ens_stats = xs.ensemble_stats(
datasets=ind_dict,
to_level=f"ensemble-{processing_level}",
)
# add new id
cur["id"] = ens_stats.attrs["cat:id"]
# save to zarr
path = f"{CONFIG['paths']['task']}".format(**cur)
xs.save_to_zarr(
# ens_stats, path, **CONFIG["ensembles"]["save"]
)
pcat.update_from_ds(ds=ens_stats, path=path)
xs.send_mail(
subject="ObsFlow - Message",
msg="Congratulations! All tasks of the workflow were completed!",
)