Skip to content

Commit

Permalink
Merge pull request #91 from ArtesiaWater/dev
Browse files Browse the repository at this point in the history
some small fixes
  • Loading branch information
OnnoEbbens authored Feb 7, 2023
2 parents 37a6cc4 + f3d0a63 commit bdd4288
Show file tree
Hide file tree
Showing 14 changed files with 129,678 additions and 352 deletions.
126 changes: 63 additions & 63 deletions examples/03_hydropandas_and_pastas.ipynb

Large diffs are not rendered by default.

486 changes: 226 additions & 260 deletions examples/04_merging_observations.ipynb

Large diffs are not rendered by default.

24 changes: 24 additions & 0 deletions hydropandas/extensions/plots.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,16 @@ def interactive_plots(
"tan",
)

# check if observations consist of monitoring wells
if per_monitoring_well:
otype = self._obj._infer_otype()
if isinstance(otype, (list, np.ndarray)):
per_monitoring_well = False
elif otype.__name__ == "GroundwaterObs":
pass
else:
per_monitoring_well = False

if per_monitoring_well:
plot_names = self._obj.groupby("monitoring_well").count().index
else:
Expand Down Expand Up @@ -214,10 +224,24 @@ def interactive_map(
if m is None:
m = folium.Map([northing, easting], zoom_start=zoom_start)

# get oc name if no legend name is given
if legend_name is None:
legend_name = self._obj.name

# add the point observations with plots to the map
group_name = '<span style=\\"color: {};\\">{}</span>'.format(color, legend_name)
group = folium.FeatureGroup(name=group_name)

# check if observations consist of monitoring wells
if per_monitoring_well:
otype = self._obj._infer_otype()
if isinstance(otype, (list, np.ndarray)):
per_monitoring_well = False
elif otype.__name__ == "GroundwaterObs":
pass
else:
per_monitoring_well = False

if per_monitoring_well:
plot_names = self._obj.groupby("monitoring_well").count().index
else:
Expand Down
20 changes: 13 additions & 7 deletions hydropandas/io/io_bro.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,11 +194,12 @@ def get_gld_id_from_gmw(bro_id, tube_nr):
if len(tube["gldReferences"]) == 1:
return tube["gldReferences"][0]["broId"]
elif len(tube["gldReferences"]) == 0:
logger.info(f"no groundwater level dossier for {bro_id} and tube number {tube_nr}")
logger.info(
f"no groundwater level dossier for {bro_id} and tube number {tube_nr}"
)
return None
else:
raise RuntimeError("unexpected number of gld references")



def measurements_from_gld(
Expand Down Expand Up @@ -305,6 +306,9 @@ def measurements_from_gld(

df = df.sort_index()

# slice to tmin and tmax
df = df.loc[tmin:tmax]

# add metadata from gmw
meta.update(get_metadata_from_gmw(meta["monitoring_well"], meta["tube_nr"]))

Expand Down Expand Up @@ -572,15 +576,17 @@ class of the observations, e.g. GroundwaterObs or WaterlvlObs
"gml": "http://www.opengis.net/gml/3.2",
"brocom": "http://www.broservices.nl/xsd/brocommon/3.0",
}
if tree.find(".//brocom:responseType", ns).text == 'rejection':

if tree.find(".//brocom:responseType", ns).text == "rejection":
raise RuntimeError(tree.find(".//brocom:rejectionReason", ns).text)

gmws = tree.findall(".//dsgmw:GMW_C", ns)

if len(gmws) > 1000:
ans = input(f'You requested to download {len(gmws)} observations, this can take a while. Are you sure you want to continue [Y/n]? ')
if ans not in ['Y', 'y','yes','Yes', 'YES']:
ans = input(
f"You requested to download {len(gmws)} observations, this can take a while. Are you sure you want to continue [Y/n]? "
)
if ans not in ["Y", "y", "yes", "Yes", "YES"]:
return []

obs_list = []
Expand Down
109 changes: 100 additions & 9 deletions hydropandas/io/io_knmi.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,12 +292,10 @@ def _check_latest_measurement_date_RD_debilt(meteo_var, use_api=True):
except (RuntimeError, requests.ConnectionError):
logger.warning("KNMI API failed, switching to non-API method")
knmi_df, _ = get_knmi_daily_rainfall_url(
550, "DE-BILT", "RD", start, end, inseason=False
550, "DE-BILT", "RD", start, end
)
else:
knmi_df, _ = get_knmi_daily_rainfall_url(
550, "DE-BILT", "RD", start, end, inseason=False
)
knmi_df, _ = get_knmi_daily_rainfall_url(550, "DE-BILT", "RD", start, end)
else:
if use_api:
try:
Expand Down Expand Up @@ -442,7 +440,7 @@ def download_knmi_data(
elif meteo_var == "RD":
# daily data from rainfall-stations
knmi_df, variables = get_knmi_daily_rainfall_url(
stn, stn_name, meteo_var, start, end, settings["inseason"]
stn, stn_name, meteo_var, start, end
)
else:
# daily data from meteorological stations
Expand Down Expand Up @@ -522,7 +520,7 @@ def get_knmi_daily_rainfall_api(

@lru_cache()
def get_knmi_daily_rainfall_url(
stn, stn_name, meteo_var, start=None, end=None, inseason=False, use_cache=True
stn, stn_name, meteo_var="RD", start=None, end=None, use_cache=True
):
"""download and read knmi daily rainfall.
Expand All @@ -538,8 +536,6 @@ def get_knmi_daily_rainfall_url(
start time of observations.
end : pd.TimeStamp
end time of observations.
inseason : boolean
flag to obtain inseason data.
Raises
------
Expand Down Expand Up @@ -583,6 +579,31 @@ def get_knmi_daily_rainfall_url(
# unzip file
util.unzip_file(fname_zip, fname_dir, force=True, preserve_datetime=True)

return read_knmi_daily_rainfall_file(fname_txt, meteo_var, start=None, end=None)


def read_knmi_daily_rainfall_file(fname_txt, meteo_var="RD", start=None, end=None):
"""read a knmi file with daily rainfall data.
Parameters
----------
fname_txt : str
file path of a knmi .txt file.
meteo_var : str
must be 'RD'.
start : pd.TimeStamp
start time of observations.
end : pd.TimeStamp
end time of observations.
Returns
-------
pandas DataFrame
measurements.
variables : dictionary
additional information about the variables
"""
with open(fname_txt, "r") as f:
line = f.readline()
# get meteo var
Expand Down Expand Up @@ -622,6 +643,12 @@ def get_knmi_daily_rainfall_url(
df, variables = _transform_variables(df, variables)
variables["unit"] = "m"

# add station to variables
if len(df["STN"].unique()) != 1:
raise ValueError("multiple stations in single file")
else:
variables["station"] = df["STN"].iloc[0]

return df.loc[start:end, [meteo_var]], variables


Expand Down Expand Up @@ -907,6 +934,37 @@ def get_knmi_daily_meteo_url(stn, meteo_var, start, end, use_cache=True):
# unzip file
util.unzip_file(fname_zip, fname_dir, force=True, preserve_datetime=True)

return read_knmi_daily_meteo_file(fname_txt, meteo_var, start=None, end=None)


def read_knmi_daily_meteo_file(fname_txt, meteo_var, start=None, end=None):
"""read knmi daily meteo data from a file
Parameters
----------
fname_txt : str
file path.
meteo_var : str
e.g. 'EV24'.
start : pd.TimeStamp
start time of observations.
end : pd.TimeStamp
end time of observations.
Raises
------
ValueError
If the meteo var is not in the file.
Returns
-------
pandas DataFrame
measurements.
variables : dictionary
additional information about the variables
stations : pandas DataFrame
additional data about the measurement station
"""
variables = None
with open(fname_txt, "r") as f:
line = f.readline()
Expand All @@ -919,7 +977,7 @@ def get_knmi_daily_meteo_url(stn, meteo_var, start, end, use_cache=True):
line = f.readline()

if variables is None:
raise ValueError(f"could not find {meteo_var} for station {stn}")
raise ValueError(f"could not find {meteo_var} in file {fname_txt}")

# get dataframe
for _ in range(50):
Expand All @@ -941,10 +999,18 @@ def get_knmi_daily_meteo_url(stn, meteo_var, start, end, use_cache=True):

# from UT to UT+1 (standard-time in the Netherlands)
df.index = df.index + pd.to_timedelta(1, unit="h")

# add station to variables
if len(df["STN"].unique()) != 1:
raise ValueError("multiple stations in single file")
else:
station = df["STN"].iloc[0]

df = df.loc[start:end, [meteo_var]]
df = df.dropna()
df, variables = _transform_variables(df, variables)
variables["unit"] = ""
variables["station"] = station
break

line = f.readline()
Expand Down Expand Up @@ -1118,6 +1184,31 @@ def _get_default_settings(settings):
return settings


def read_knmi_timeseries_file(fname, meteo_var, start, end):

if meteo_var == "RD":
knmi_df, meta = read_knmi_daily_rainfall_file(
fname, meteo_var="RD", start=start, end=end
)
else:
knmi_df, meta, _ = read_knmi_daily_meteo_file(
fname, meteo_var, start=start, end=end
)

# get stations
stations = get_stations(meteo_var=meteo_var)
stn_name = get_station_name(meta["station"], stations)

# set metadata
x = stations.loc[meta["station"], "x"]
y = stations.loc[meta["station"], "y"]
meta.update(
{"x": x, "y": y, "name": f"{meteo_var}_{stn_name}", "source": "KNMI",}
)

return knmi_df, meta


def get_knmi_timeseries_stn(stn, meteo_var, start, end, settings=None):
"""Get a knmi time series and metadata.
Expand Down
6 changes: 3 additions & 3 deletions hydropandas/obs_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def read_bro(
tmin=None,
tmax=None,
only_metadata=False,
keep_all_obs=True,
keep_all_obs=False,
epsg=28992,
):
""" get all the observations within an extent or within a
Expand Down Expand Up @@ -947,7 +947,7 @@ def from_bro(
only_metadata=False,
keep_all_obs=True,
epsg=28992,
ignore_max_obs=False
ignore_max_obs=False,
):
""" get all the observations within an extent or within a
groundwatermonitoring net.
Expand Down Expand Up @@ -997,7 +997,7 @@ def from_bro(
only_metadata=only_metadata,
keep_all_obs=keep_all_obs,
epsg=epsg,
ignore_max_obs=ignore_max_obs
ignore_max_obs=ignore_max_obs,
)
meta = {}
elif bro_id is not None:
Expand Down
Loading

0 comments on commit bdd4288

Please sign in to comment.