From 7d0bd3c7a022070cf1b6923851b3c9af7b8f0673 Mon Sep 17 00:00:00 2001 From: Felipe Carlos Date: Mon, 29 Jul 2024 22:30:12 -0300 Subject: [PATCH] fix tuning operation with torch models --- R/api_torch.R | 14 ++++++++------ R/sits_classify.R | 2 +- R/sits_lighttae.R | 8 +++++--- R/sits_mlp.R | 9 +++++---- R/sits_tae.R | 11 +++++------ R/sits_tempcnn.R | 11 ++++++----- R/sits_tuning.R | 7 +++---- man/sits_classify.Rd | 2 +- man/sits_tuning.Rd | 2 +- 9 files changed, 35 insertions(+), 31 deletions(-) diff --git a/R/api_torch.R b/R/api_torch.R index 7ec3b214f..5742cf97c 100644 --- a/R/api_torch.R +++ b/R/api_torch.R @@ -368,22 +368,24 @@ .is_torch_model <- function(ml_model) { inherits(ml_model, "torch_model") } + .torch_has_cuda <- function(){ torch::cuda_is_available() } + .torch_has_mps <- function(){ torch::backends_mps_is_available() } .torch_mem_info <- function() { - if (.torch_has_cuda()){ - # Get memory summary + mem_sum <- 0 + + if (.torch_has_cuda()) { + # get current memory info in GB mem_sum <- torch::cuda_memory_stats() - # Return current memory info in GB - mem_sum[["allocated_bytes"]][["all"]][["current"]] / 10^9 - } else { - mem_sum <- 0 + mem_sum <- mem_sum[["allocated_bytes"]][["all"]][["current"]] / 10^9 } + return(mem_sum) } #' @title Verify if torch works on CUDA diff --git a/R/sits_classify.R b/R/sits_classify.R index 8aee78aa2..250b1e544 100644 --- a/R/sits_classify.R +++ b/R/sits_classify.R @@ -35,7 +35,7 @@ #' (integer, min = 1, max = 16384). #' @param multicores Number of cores to be used for classification #' (integer, min = 1, max = 2048). -#' @param gpu_memory Memory available in GPU in GB (default = 16) +#' @param gpu_memory Memory available in GPU in GB (default = 4) #' @param n_sam_pol Number of time series per segment to be classified #' (integer, min = 10, max = 50). #' @param output_dir Valid directory for output file. diff --git a/R/sits_lighttae.R b/R/sits_lighttae.R index 73c74105d..fd81fbeff 100644 --- a/R/sits_lighttae.R +++ b/R/sits_lighttae.R @@ -339,10 +339,12 @@ sits_lighttae <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # Load into GPU - if (.torch_has_cuda()){ + # Get GPU memory + gpu_memory <- sits_env[["gpu_memory"]] + # if CUDA is available and gpu memory is defined, transform values + # to torch dataloader + if (.torch_has_cuda() && .has(gpu_memory)) { # set the batch size according to the GPU memory - gpu_memory <- sits_env[["gpu_memory"]] b_size <- 2^gpu_memory # transfor the input array to a dataset values <- .as_dataset(values) diff --git a/R/sits_mlp.R b/R/sits_mlp.R index eebb72c92..51da192d9 100644 --- a/R/sits_mlp.R +++ b/R/sits_mlp.R @@ -289,11 +289,12 @@ sits_mlp <- function(samples = NULL, values <- .pred_normalize(pred = values, stats = ml_stats) # Transform input into matrix values <- as.matrix(values) - # if CUDA is available, transform to torch data set - # Load into GPU - if (.torch_has_cuda()){ + # Get GPU memory + gpu_memory <- sits_env[["gpu_memory"]] + # if CUDA is available and gpu memory is defined, transform values + # to torch dataloader + if (.torch_has_cuda() && .has(gpu_memory)) { # set the batch size according to the GPU memory - gpu_memory <- sits_env[["gpu_memory"]] b_size <- 2^gpu_memory # transfor the input array to a dataset values <- .as_dataset(values) diff --git a/R/sits_tae.R b/R/sits_tae.R index df1fbe2c2..50fd27286 100644 --- a/R/sits_tae.R +++ b/R/sits_tae.R @@ -307,13 +307,12 @@ sits_tae <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # Predict using GPU if available - # If not, use CPU - # if CUDA is available, transform to torch data set - # Load into GPU - if (.torch_has_cuda()){ + # Get GPU memory + gpu_memory <- sits_env[["gpu_memory"]] + # if CUDA is available and gpu memory is defined, transform values + # to torch dataloader + if (.torch_has_cuda() && .has(gpu_memory)) { # set the batch size according to the GPU memory - gpu_memory <- sits_env[["gpu_memory"]] b_size <- 2^gpu_memory # transfor the input array to a dataset values <- .as_dataset(values) diff --git a/R/sits_tempcnn.R b/R/sits_tempcnn.R index a34117d89..b22d4e733 100644 --- a/R/sits_tempcnn.R +++ b/R/sits_tempcnn.R @@ -358,15 +358,16 @@ sits_tempcnn <- function(samples = NULL, values <- array( data = as.matrix(values), dim = c(n_samples, n_times, n_bands) ) - # if CUDA is available, transform to torch data set - # Load into GPU - if (.torch_has_cuda()){ + # Get GPU memory + gpu_memory <- sits_env[["gpu_memory"]] + # if CUDA is available and gpu memory is defined, transform values + # to torch dataloader + if (.torch_has_cuda() && .has(gpu_memory)) { # set the batch size according to the GPU memory - gpu_memory <- sits_env[["gpu_memory"]] b_size <- 2^gpu_memory # transfor the input array to a dataset values <- .as_dataset(values) - # To the data set to a torcj transform in a dataloader to use the batch size + # To the data set to a torch transform in a dataloader to use the batch size values <- torch::dataloader(values, batch_size = b_size) # Do GPU classification with dataloader values <- .try( diff --git a/R/sits_tuning.R b/R/sits_tuning.R index 39bf9ed87..a0fc5279c 100644 --- a/R/sits_tuning.R +++ b/R/sits_tuning.R @@ -29,9 +29,9 @@ #' \code{ml_method}. User can use \code{uniform}, \code{choice}, #' \code{randint}, \code{normal}, \code{lognormal}, \code{loguniform}, #' and \code{beta} distribution functions to randomize parameters. -#' @param trials Number of random trials to perform the random search. -#' @param progress Show progress bar? -#' @param multicores Number of cores to process in parallel +#' @param trials Number of random trials to perform the random search. +#' @param progress Show progress bar? +#' @param multicores Number of cores to process in parallel. #' #' @return #' A tibble containing all parameters used to train on each trial @@ -87,7 +87,6 @@ sits_tuning <- function(samples, # check validation_split parameter if samples_validation is not passed .check_num_parameter(validation_split, exclusive_min = 0, max = 0.5) } - # check 'ml_functions' parameter ml_function <- substitute(ml_method, env = environment()) if (is.call(ml_function)) diff --git a/man/sits_classify.Rd b/man/sits_classify.Rd index fdb09bc44..a29d80c07 100644 --- a/man/sits_classify.Rd +++ b/man/sits_classify.Rd @@ -91,7 +91,7 @@ sits_classify( \item{impute_fn}{Imputation function to remove NA.} -\item{gpu_memory}{Memory available in GPU in GB (default = 16)} +\item{gpu_memory}{Memory available in GPU in GB (default = 4)} \item{roi}{Region of interest (either an sf object, shapefile, or a numeric vector with named XY values diff --git a/man/sits_tuning.Rd b/man/sits_tuning.Rd index 9057e4fa5..bc9becbb2 100644 --- a/man/sits_tuning.Rd +++ b/man/sits_tuning.Rd @@ -33,7 +33,7 @@ and \code{beta} distribution functions to randomize parameters.} \item{trials}{Number of random trials to perform the random search.} -\item{multicores}{Number of cores to process in parallel} +\item{multicores}{Number of cores to process in parallel.} \item{progress}{Show progress bar?} }