Skip to content
This repository has been archived by the owner on Mar 18, 2024. It is now read-only.

MAINT: update munging #7

Open
wants to merge 5 commits into
base: carbon_flux_notebook
Choose a base branch
from
Open
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
82 changes: 62 additions & 20 deletions examples/Carbon_Flux.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -43,36 +43,77 @@
"outputs": [],
"source": [
"# %read in FluxNet CSVs and replace NaNs with zeros\n",
"\n",
"import numpy as np\n",
"import datetime\n",
"import os\n",
"sites = [fname.split('_')[1] for fname in os.listdir('./flux_data/dailies/')]\n",
"\n",
"latlon = pd.read_csv('./flux_data/latlon.csv', header=None, names=['site', 'lat', 'lon'])\n",
"sites = [fname.split('_')[1] for fname in os.listdir('./flux_data/dailies/')]\n",
"\n",
"def site_lat_lon(latlon, site):\n",
"def site_lat_lon(site):\n",
" latlon = pd.read_csv('./flux_data/latlon.csv', header=None, names=['site', 'lat', 'lon'])\n",
" location = latlon[latlon['site'] == site]\n",
" if 0 in [len(location['lat']), len(location['lat'])]:\n",
" return None, None\n",
" return float(location['lat']), float(location['lon'])\n",
"\n",
"def _parse_days(integer):\n",
" \"\"\" `integer` date as `20180704` to represent July 4th, 2018\"\"\"\n",
" x = str(integer)\n",
" d = {'year': int(x[:4]), 'month': int(x[4:6]), 'day': int(x[6:])}\n",
" day_of_year = datetime.datetime(d['year'], d['month'], d['day']).timetuple().tm_yday\n",
" return day_of_year\n",
"\n",
"def clean(df, timestamp_col=\"TIMESTAMP\", site='', keep=[], drop=[], predict=''):\n",
" limit = -9990\n",
" for i in range(50):\n",
" df = df.replace(limit - i, np.nan)\n",
" \n",
" assert all([col in df.columns for col in drop]), \"Some columns to drop not in the dataframe.\"\n",
" df.drop(columns=drop, inplace=True)\n",
" for col in keep:\n",
" if col not in df.columns:\n",
" if 'SWC_F' in col or 'TS_F' in col:\n",
" df[col] = 0\n",
" \n",
" df = df.fillna(0)\n",
" df['DOY'] = df['TIMESTAMP'].apply(_parse_days) \n",
" df.pop('TIMESTAMP')\n",
" X = df[keep]\n",
" y = df[predict]\n",
" return X, y\n",
"\n",
"def load_fluxnet_site(site):\n",
" # dataRaw(dataRaw <= -9990) = 0/0;\n",
" # dataRaw(dataRaw <= -9990) = 0/0 (is NaN?)\n",
" # NaN -> zero\n",
" prefix = 'FLX_{site}_FLUXNET'.format(site=site)\n",
" filename = [fname for fname in os.listdir('./flux_data/dailies/') if fname.startswith(prefix)][0]\n",
" filenames = [fname for fname in os.listdir('./flux_data/dailies/')\n",
" if fname.startswith(prefix)]\n",
" assert len(filenames) == 1\n",
" filename = filenames[0]\n",
" \n",
" raw_daily = pd.read_csv('./flux_data/dailies/{filename}'.format(filename=filename)) \n",
" derived_cols = ['YEAR','DOY']\n",
" fluxcols = ['P_ERA','TA_ERA','PA_ERA','SW_IN_ERA','LW_IN_ERA'\n",
" ,'WS_ERA','LE_F_MDS','H_F_MDS','NEE_CUT_USTAR50','NEE_VUT_USTAR50',\n",
" 'SWC_F_MDS_1','SWC_F_MDS_2','SWC_F_MDS_3','TS_F_MDS_1','TS_F_MDS_2'\n",
" 'TS_F_MDS_3','VPD_ERA','GPP_DT_VUT_USTAR50','GPP_DT_CUT_USTAR50']\n",
" available = [col for col in fluxcols if col in raw_daily.columns]\n",
" return raw_daily[['TIMESTAMP']+available]\n",
"\n",
"\n",
"def map_to_days(data):\n",
" return data"
" lat, lon = site_lat_lon(site)\n",
" raw_daily['lat'] = lat\n",
" raw_daily['lon'] = lon\n",
" \n",
" keep = ['P_ERA',\n",
" 'TA_ERA',\n",
" 'PA_ERA',\n",
" 'SW_IN_ERA',\n",
" 'LW_IN_ERA',\n",
" 'WS_ERA',\n",
" 'SWC_F_MDS_1', 'SWC_F_MDS_2', 'SWC_F_MDS_3',\n",
" 'TS_F_MDS_1', 'TS_F_MDS_2', 'TS_F_MDS_3',\n",
" 'VPD_ERA',\n",
" 'DOY', 'lat', 'lon']\n",
" drop = [\"GPP_DT_VUT_USTAR50\",\n",
" \"GPP_DT_CUT_USTAR50\",\n",
" \"LE_F_MDS\",\n",
" \"H_F_MDS\"]\n",
" predict = [\"NEE_CUT_USTAR50\",\n",
" \"NEE_VUT_USTAR50\"]\n",
" X, y = clean(raw_daily, keep=keep, drop=drop, predict=predict[0])\n",
" return X, y"
]
},
{
Expand All @@ -81,9 +122,10 @@
"metadata": {},
"outputs": [],
"source": [
"dailies = load_fluxnet_site(sites[0])\n",
"X, y = load_fluxnet_site(sites[0])\n",
"print('Example FLUXNET data for site {site}:'.format(site=sites[0]))\n",
"dailies.head()"
"print('Observations: {}, variables per observation: {}'.format(*X.shape))\n",
"X.describe()"
]
},
{
Expand Down Expand Up @@ -158,7 +200,7 @@
"outputs": [],
"source": [
"import scipy.io\n",
"rsif = scipy.io.loadmat('RSIF_2007_2016_05N_01L.mat')"
"rsif = scipy.io.loadmat('./flux_data/RSIF_2007_2016_05N_01L.mat')"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I note you've changed where to expect this .mat file. Either option is fine though this way means overwriting the git lfs 'file' of the same name...

]
},
{
Expand Down