WPS process for the rerank.netcdf.wrapper from ClimDown
Quantile Reranking fixes bias introduced by the Climate Analogues (CA) step by re-applying a simple quantile mapping bias correction at each grid box
from birdy import WPSClient
from netCDF4 import Dataset
from pkg_resources import resource_filename
from wps_tools.testing import get_target_url
from tempfile import NamedTemporaryFile
import requests
import os
# Ensure we are in the working directory with access to the data
while os.path.basename(os.getcwd()) != "chickadee":
os.chdir('../')
# NBVAL_IGNORE_OUTPUT
url = get_target_url("chickadee")
print(f"Using chickadee on {url}")
Using chickadee on https://docker-dev03.pcic.uvic.ca/twitcher/ows/proxy/chickadee/wps
chickadee = WPSClient(url)
# NBVAL_IGNORE_OUTPUT
chickadee.rerank?
Signature: chickadee.rerank( obs_file, varname=None, qdm_file=None, analogues_object=None, num_cores='4', loglevel='INFO', units_bool=True, n_pr_bool=True, tasmax_units='celsius', tasmin_units='celsius', pr_units='kg m-2 d-1', max_gb=1.0, start_date=datetime.date(1971, 1, 1), end_date=datetime.date(2005, 12, 31), out_file=None, analogues_name='analogues', ) Docstring: Quantile Reranking fixes bias introduced by the Climate Analogues step Parameters ---------- obs_file : ComplexData:mimetype:`application/x-netcdf`, :mimetype:`application/x-ogc-dods` Filename of high-res gridded historical observations varname : string Name of the NetCDF variable to downscale (e.g. 'tasmax') out_file : string Filename to create with the climate imprint outputs num_cores : {'1', '2', '3', '4'}positiveInteger The number of cores to use for parallel execution loglevel : {'CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'}string Logging level qdm_file : ComplexData:mimetype:`application/x-netcdf`, :mimetype:`application/x-ogc-dods` Filename of output from QDM step analogues_object : ComplexData:mimetype:`application/x-gzip` R object containing the analogues produced from the CA step (suffix .rda) analogues_name : string Name of the R object containing the analogues units_bool : boolean Check the input units and convert them to the target output units n_pr_bool : boolean Check for and eliminate negative precipitation values tasmax_units : string Units used for tasmax in output file tasmin_units : string Units used for tasmin in output file pr_units : string Units used for pr in output file max_gb : float Anapproximately how much RAM to use in the chunk I/O loop. It’s best to set this to about 1/3 to 1/4 of what you want the high-water mark to be start_date : date Defines the stat of the calibration period end_date : date Defines the end of the calibration period Returns ------- output : ComplexData:mimetype:`application/x-netcdf` Output Netcdf File File: ~/code/birds/chickadee/</tmp/chickadee-venv/lib/python3.8/site-packages/birdy/client/base.py-4> Type: method
with NamedTemporaryFile(suffix=".nc", prefix="output_", dir="/tmp", delete=True) as out_file:
output = chickadee.rerank(
qdm_file=resource_filename("tests", "/data/QDM_expected_output.nc"),
obs_file=resource_filename("tests", "/data/tiny_obs.nc"),
varname="tasmax",
out_file=out_file.name,
num_cores=2,
analogues_object=resource_filename("tests", "data/analogues.rda")
)
output_data = output.get()[0]
Access the output with nc_to_dataset() or auto_construct_outputs() from wps_tools.output_handling
# NBVAL_IGNORE_OUTPUT
from wps_tools.output_handling import nc_to_dataset, auto_construct_outputs
output_dataset = nc_to_dataset(output.get()[0])
output_dataset
<class 'netCDF4._netCDF4.Dataset'> root group (NETCDF3_CLASSIC data model, file format NETCDF3): dimensions(sizes): lon(26), lat(26), time(3651) variables(dimensions): float64 lon(lon), float64 lat(lat), float64 time(time), float32 tasmax(time,lat,lon) groups:
# NBVAL_IGNORE_OUTPUT
auto_construct_outputs(output.get())
[<class 'netCDF4._netCDF4.Dataset'> root group (NETCDF3_CLASSIC data model, file format NETCDF3): dimensions(sizes): lon(26), lat(26), time(3651) variables(dimensions): float64 lon(lon), float64 lat(lat), float64 time(time), float32 tasmax(time,lat,lon) groups: ]
expected_data = Dataset(resource_filename("tests","/data/bccaq_expected_output.nc"))
for key, value in expected_data.dimensions.items():
assert str(output_dataset.dimensions[key]) == str(value)