diff --git a/R/api_torch.R b/R/api_torch.R index 7ec3b214..5742cf97 100644 --- a/R/api_torch.R +++ b/R/api_torch.R @@ -368,22 +368,24 @@ .is_torch_model <- function(ml_model) { inherits(ml_model, "torch_model") } + .torch_has_cuda <- function(){ torch::cuda_is_available() } + .torch_has_mps <- function(){ torch::backends_mps_is_available() } .torch_mem_info <- function() { - if (.torch_has_cuda()){ - # Get memory summary + mem_sum <- 0 + + if (.torch_has_cuda()) { + # get current memory info in GB mem_sum <- torch::cuda_memory_stats() - # Return current memory info in GB - mem_sum[["allocated_bytes"]][["all"]][["current"]] / 10^9 - } else { - mem_sum <- 0 + mem_sum <- mem_sum[["allocated_bytes"]][["all"]][["current"]] / 10^9 } + return(mem_sum) } #' @title Verify if torch works on CUDA diff --git a/R/sits_classify.R b/R/sits_classify.R index 8aee78aa..250b1e54 100644 --- a/R/sits_classify.R +++ b/R/sits_classify.R @@ -35,7 +35,7 @@ #' (integer, min = 1, max = 16384). #' @param multicores Number of cores to be used for classification #' (integer, min = 1, max = 2048). -#' @param gpu_memory Memory available in GPU in GB (default = 16) +#' @param gpu_memory Memory available in GPU in GB (default = 4) #' @param n_sam_pol Number of time series per segment to be classified #' (integer, min = 10, max = 50). #' @param output_dir Valid directory for output file. diff --git a/R/sits_lighttae.R b/R/sits_lighttae.R index 73c74105..fd81fbef 100644 --- a/R/sits_lighttae.R +++ b/R/sits_lighttae.R @@ -339,10 +339,12 @@ sits_lighttae <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # Load into GPU - if (.torch_has_cuda()){ + # Get GPU memory + gpu_memory <- sits_env[["gpu_memory"]] + # if CUDA is available and gpu memory is defined, transform values + # to torch dataloader + if (.torch_has_cuda() && .has(gpu_memory)) { # set the batch size according to the GPU memory - gpu_memory <- sits_env[["gpu_memory"]] b_size <- 2^gpu_memory # transfor the input array to a dataset values <- .as_dataset(values) diff --git a/R/sits_mlp.R b/R/sits_mlp.R index eebb72c9..51da192d 100644 --- a/R/sits_mlp.R +++ b/R/sits_mlp.R @@ -289,11 +289,12 @@ sits_mlp <- function(samples = NULL, values <- .pred_normalize(pred = values, stats = ml_stats) # Transform input into matrix values <- as.matrix(values) - # if CUDA is available, transform to torch data set - # Load into GPU - if (.torch_has_cuda()){ + # Get GPU memory + gpu_memory <- sits_env[["gpu_memory"]] + # if CUDA is available and gpu memory is defined, transform values + # to torch dataloader + if (.torch_has_cuda() && .has(gpu_memory)) { # set the batch size according to the GPU memory - gpu_memory <- sits_env[["gpu_memory"]] b_size <- 2^gpu_memory # transfor the input array to a dataset values <- .as_dataset(values) diff --git a/R/sits_tae.R b/R/sits_tae.R index df1fbe2c..50fd2728 100644 --- a/R/sits_tae.R +++ b/R/sits_tae.R @@ -307,13 +307,12 @@ sits_tae <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # Predict using GPU if available - # If not, use CPU - # if CUDA is available, transform to torch data set - # Load into GPU - if (.torch_has_cuda()){ + # Get GPU memory + gpu_memory <- sits_env[["gpu_memory"]] + # if CUDA is available and gpu memory is defined, transform values + # to torch dataloader + if (.torch_has_cuda() && .has(gpu_memory)) { # set the batch size according to the GPU memory - gpu_memory <- sits_env[["gpu_memory"]] b_size <- 2^gpu_memory # transfor the input array to a dataset values <- .as_dataset(values) diff --git a/R/sits_tempcnn.R b/R/sits_tempcnn.R index a34117d8..b22d4e73 100644 --- a/R/sits_tempcnn.R +++ b/R/sits_tempcnn.R @@ -358,15 +358,16 @@ sits_tempcnn <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # if CUDA is available, transform to torch data set - # Load into GPU - if (.torch_has_cuda()){ + # Get GPU memory + gpu_memory <- sits_env[["gpu_memory"]] + # if CUDA is available and gpu memory is defined, transform values + # to torch dataloader + if (.torch_has_cuda() && .has(gpu_memory)) { # set the batch size according to the GPU memory - gpu_memory <- sits_env[["gpu_memory"]] b_size <- 2^gpu_memory # transfor the input array to a dataset values <- .as_dataset(values) - # To the data set to a torcj transform in a dataloader to use the batch size + # To the data set to a torch transform in a dataloader to use the batch size values <- torch::dataloader(values, batch_size = b_size) # Do GPU classification with dataloader values <- .try( diff --git a/R/sits_tuning.R b/R/sits_tuning.R index 39bf9ed8..a0fc5279 100644 --- a/R/sits_tuning.R +++ b/R/sits_tuning.R @@ -29,9 +29,9 @@ #' \code{ml_method}. User can use \code{uniform}, \code{choice}, #' \code{randint}, \code{normal}, \code{lognormal}, \code{loguniform}, #' and \code{beta} distribution functions to randomize parameters. -#' @param trials Number of random trials to perform the random search. -#' @param progress Show progress bar? -#' @param multicores Number of cores to process in parallel +#' @param trials Number of random trials to perform the random search. +#' @param progress Show progress bar? +#' @param multicores Number of cores to process in parallel. #' #' @return #' A tibble containing all parameters used to train on each trial @@ -87,7 +87,6 @@ sits_tuning <- function(samples, # check validation_split parameter if samples_validation is not passed .check_num_parameter(validation_split, exclusive_min = 0, max = 0.5) } - # check 'ml_functions' parameter ml_function <- substitute(ml_method, env = environment()) if (is.call(ml_function)) diff --git a/man/sits_classify.Rd b/man/sits_classify.Rd index fdb09bc4..a29d80c0 100644 --- a/man/sits_classify.Rd +++ b/man/sits_classify.Rd @@ -91,7 +91,7 @@ sits_classify( \item{impute_fn}{Imputation function to remove NA.} -\item{gpu_memory}{Memory available in GPU in GB (default = 16)} +\item{gpu_memory}{Memory available in GPU in GB (default = 4)} \item{roi}{Region of interest (either an sf object, shapefile, or a numeric vector with named XY values diff --git a/man/sits_tuning.Rd b/man/sits_tuning.Rd index 9057e4fa..bc9becbb 100644 --- a/man/sits_tuning.Rd +++ b/man/sits_tuning.Rd @@ -33,7 +33,7 @@ and \code{beta} distribution functions to randomize parameters.} \item{trials}{Number of random trials to perform the random search.} -\item{multicores}{Number of cores to process in parallel} +\item{multicores}{Number of cores to process in parallel.} \item{progress}{Show progress bar?} }