Skip to content

Commit

Permalink
testing rerddap functionality as replacement
Browse files Browse the repository at this point in the history
  • Loading branch information
Brendan Turley committed Jun 6, 2024
1 parent 2d27a47 commit eb01cb9
Showing 1 changed file with 55 additions and 24 deletions.
79 changes: 55 additions & 24 deletions indicator_processing/automated_download/sst.R
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@
rm(list = ls())
dev.off()

library(lubridate)
library(maps)
library(plotTimeSeries)
library(rerddap)

load("indicator_processing/spec_file.RData")

Expand All @@ -17,37 +19,66 @@ load("indicator_processing/spec_file.RData")
styear <- 1982
enyear <- terminal_year

# get ERDDAP info --------------------------------
sst <- info('ncdcOisst21Agg')
sst <- info('ncdcOisst21Agg_LonPM180') # this may work better

# empty data -------------------------------------------------
dat <- data.frame(row.names = c("year", "mon", "PR_mean", "PR_min", "PR_max", "VI_mean", "VI_min", "VI_max"))
# dat <- data.frame(row.names = c("year", "mon", "PR_mean", "PR_min", "PR_max", "VI_mean", "VI_min", "VI_max"))

dat <- setNames(data.frame(matrix(NA,length(styear:enyear)*12,5)),
c("year", "mon", 'mean', 'min', 'max'))
m <- 1
n <- 0

# download by year to avoid timeout errors --------------------
for (yr in styear:enyear) {

# url from ERDDAP for OISST, download and read ----------------
url <- paste0("https://coastwatch.pfeg.noaa.gov/erddap/griddap/ncdcOisst21Agg.csv?sst[(", yr, "-01-01T12:00:00Z):1:(", yr, "-12-31T12:00:00Z)][(0.0):1:(0.0)][(17):1:(19)][(292):1:(296)]")

# url_vi <- paste0("https://coastwatch.pfeg.noaa.gov/erddap/griddap/ncdcOisst21Agg.csv?sst[(", yr, "-01-01T12:00:00Z):1:(", yr, "-12-31T12:00:00Z)][(0.0):1:(0.0)][(17):1:(19)][(294.75):1:(296)]")
# url_pr <- paste0("https://coastwatch.pfeg.noaa.gov/erddap/griddap/ncdcOisst21Agg.csv?sst[(", yr, "-01-01T12:00:00Z):1:(", yr, "-12-31T12:00:00Z)][(0.0):1:(0.0)][(17):1:(19)][(292):1:(294.75)]")
# download for entire Caribbean because USVI and PR highly correlated.
download.file(url = url, destfile = "st.csv")

labs <- read.table("st.csv", sep = ",", header = T, skip = 0)
sst_vi <- read.table("st.csv", sep = ",", header = T, skip = 1)
names(sst_vi) <- names(labs)

# extract month -----------------------------------------------
sst_vi$mon <- strftime(sst_vi$time, format = "%m")

# calculate mean, min, max and concatenate --------------------
ind_vi <- as.numeric(tapply(sst_vi$sst, sst_vi$mon, mean, na.rm = T))
min_vi <- as.numeric(tapply(sst_vi$sst, sst_vi$mon, min, na.rm = T))
max_vi <- as.numeric(tapply(sst_vi$sst, sst_vi$mon, max, na.rm = T))

tempdat <- data.frame(yr, unique(sst_vi$mon), ind_vi, min_vi, max_vi, stringsAsFactors = F)
dat <- data.frame(rbind(dat, tempdat), stringsAsFactors = F)
### BDT rERDDAP fix
sst_grab <- griddap(sst, fields = 'sst',
time = c(paste0(yr,'-01-01'), paste0(yr,'-12-31')),
longitude = c(360 + min_lon, 360 + max_lon),
latitude = c(min_lat, max_lat))

sst_grab <- griddap(sst, fields = 'sst',
time = c(paste0(yr,'-01-01'), paste0(yr,'-12-31')),
longitude = c(min_lon, max_lon),
latitude = c(min_lat, max_lat))

sst_agg <- aggregate(sst_grab$data$sst,
by = list(year(sst_grab$data$time), month(sst_grab$data$time)),
function(x) c(mean(x, na.rm = T), min(x, na.rm = T), max(x, na.rm = T)))

n <- n + 12
dat[m:n,] <- data.frame(sst_agg[,1:2], unlist(sst_agg[,3]))
m <- n + 1

#
# # url from ERDDAP for OISST, download and read ----------------
# url <- paste0("https://coastwatch.pfeg.noaa.gov/erddap/griddap/ncdcOisst21Agg.csv?sst[(", yr, "-01-01T12:00:00Z):1:(", yr, "-12-31T12:00:00Z)][(0.0):1:(0.0)][(17):1:(19)][(292):1:(296)]")
#
# # url_vi <- paste0("https://coastwatch.pfeg.noaa.gov/erddap/griddap/ncdcOisst21Agg.csv?sst[(", yr, "-01-01T12:00:00Z):1:(", yr, "-12-31T12:00:00Z)][(0.0):1:(0.0)][(17):1:(19)][(294.75):1:(296)]")
# # url_pr <- paste0("https://coastwatch.pfeg.noaa.gov/erddap/griddap/ncdcOisst21Agg.csv?sst[(", yr, "-01-01T12:00:00Z):1:(", yr, "-12-31T12:00:00Z)][(0.0):1:(0.0)][(17):1:(19)][(292):1:(294.75)]")
# # download for entire Caribbean because USVI and PR highly correlated.
# download.file(url = url, destfile = "st.csv")
#
# labs <- read.table("st.csv", sep = ",", header = T, skip = 0)
# sst_vi <- read.table("st.csv", sep = ",", header = T, skip = 1)
# names(sst_vi) <- names(labs)
#
# # extract month -----------------------------------------------
# sst_vi$mon <- strftime(sst_vi$time, format = "%m")
#
# # calculate mean, min, max and concatenate --------------------
# ind_vi <- as.numeric(tapply(sst_vi$sst, sst_vi$mon, mean, na.rm = T))
# min_vi <- as.numeric(tapply(sst_vi$sst, sst_vi$mon, min, na.rm = T))
# max_vi <- as.numeric(tapply(sst_vi$sst, sst_vi$mon, max, na.rm = T))
#
# tempdat <- data.frame(yr, unique(sst_vi$mon), ind_vi, min_vi, max_vi, stringsAsFactors = F)
# dat <- data.frame(rbind(dat, tempdat), stringsAsFactors = F)
}

file.remove("st.csv")
# file.remove("st.csv")

# add row names and yearmonth column --------------------------
names(dat) <- c("year", "mon", "mean", "min", "max")
Expand Down

0 comments on commit eb01cb9

Please sign in to comment.