Skip to content

Commit

Permalink
fix tuning operation with torch models
Browse files Browse the repository at this point in the history
  • Loading branch information
M3nin0 committed Jul 30, 2024
1 parent 790b947 commit 7d0bd3c
Show file tree
Hide file tree
Showing 9 changed files with 35 additions and 31 deletions.
14 changes: 8 additions & 6 deletions R/api_torch.R
Original file line number Diff line number Diff line change
Expand Up @@ -368,22 +368,24 @@
.is_torch_model <- function(ml_model) {
inherits(ml_model, "torch_model")
}

.torch_has_cuda <- function(){
torch::cuda_is_available()
}

.torch_has_mps <- function(){
torch::backends_mps_is_available()
}

.torch_mem_info <- function() {
if (.torch_has_cuda()){
# Get memory summary
mem_sum <- 0

if (.torch_has_cuda()) {
# get current memory info in GB
mem_sum <- torch::cuda_memory_stats()
# Return current memory info in GB
mem_sum[["allocated_bytes"]][["all"]][["current"]] / 10^9
} else {
mem_sum <- 0
mem_sum <- mem_sum[["allocated_bytes"]][["all"]][["current"]] / 10^9
}

return(mem_sum)
}
#' @title Verify if torch works on CUDA
Expand Down
2 changes: 1 addition & 1 deletion R/sits_classify.R
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
#' (integer, min = 1, max = 16384).
#' @param multicores Number of cores to be used for classification
#' (integer, min = 1, max = 2048).
#' @param gpu_memory Memory available in GPU in GB (default = 16)
#' @param gpu_memory Memory available in GPU in GB (default = 4)
#' @param n_sam_pol Number of time series per segment to be classified
#' (integer, min = 10, max = 50).
#' @param output_dir Valid directory for output file.
Expand Down
8 changes: 5 additions & 3 deletions R/sits_lighttae.R
Original file line number Diff line number Diff line change
Expand Up @@ -339,10 +339,12 @@ sits_lighttae <- function(samples = NULL,
values <- array(
data = as.matrix(values), dim = c(n_samples, n_times, n_bands)
)
# Load into GPU
if (.torch_has_cuda()){
# Get GPU memory
gpu_memory <- sits_env[["gpu_memory"]]
# if CUDA is available and gpu memory is defined, transform values
# to torch dataloader
if (.torch_has_cuda() && .has(gpu_memory)) {
# set the batch size according to the GPU memory
gpu_memory <- sits_env[["gpu_memory"]]
b_size <- 2^gpu_memory
# transfor the input array to a dataset
values <- .as_dataset(values)
Expand Down
9 changes: 5 additions & 4 deletions R/sits_mlp.R
Original file line number Diff line number Diff line change
Expand Up @@ -289,11 +289,12 @@ sits_mlp <- function(samples = NULL,
values <- .pred_normalize(pred = values, stats = ml_stats)
# Transform input into matrix
values <- as.matrix(values)
# if CUDA is available, transform to torch data set
# Load into GPU
if (.torch_has_cuda()){
# Get GPU memory
gpu_memory <- sits_env[["gpu_memory"]]
# if CUDA is available and gpu memory is defined, transform values
# to torch dataloader
if (.torch_has_cuda() && .has(gpu_memory)) {
# set the batch size according to the GPU memory
gpu_memory <- sits_env[["gpu_memory"]]
b_size <- 2^gpu_memory
# transfor the input array to a dataset
values <- .as_dataset(values)
Expand Down
11 changes: 5 additions & 6 deletions R/sits_tae.R
Original file line number Diff line number Diff line change
Expand Up @@ -307,13 +307,12 @@ sits_tae <- function(samples = NULL,
values <- array(
data = as.matrix(values), dim = c(n_samples, n_times, n_bands)
)
# Predict using GPU if available
# If not, use CPU
# if CUDA is available, transform to torch data set
# Load into GPU
if (.torch_has_cuda()){
# Get GPU memory
gpu_memory <- sits_env[["gpu_memory"]]
# if CUDA is available and gpu memory is defined, transform values
# to torch dataloader
if (.torch_has_cuda() && .has(gpu_memory)) {
# set the batch size according to the GPU memory
gpu_memory <- sits_env[["gpu_memory"]]
b_size <- 2^gpu_memory
# transfor the input array to a dataset
values <- .as_dataset(values)
Expand Down
11 changes: 6 additions & 5 deletions R/sits_tempcnn.R
Original file line number Diff line number Diff line change
Expand Up @@ -358,15 +358,16 @@ sits_tempcnn <- function(samples = NULL,
values <- array(
data = as.matrix(values), dim = c(n_samples, n_times, n_bands)
)
# if CUDA is available, transform to torch data set
# Load into GPU
if (.torch_has_cuda()){
# Get GPU memory
gpu_memory <- sits_env[["gpu_memory"]]
# if CUDA is available and gpu memory is defined, transform values
# to torch dataloader
if (.torch_has_cuda() && .has(gpu_memory)) {
# set the batch size according to the GPU memory
gpu_memory <- sits_env[["gpu_memory"]]
b_size <- 2^gpu_memory
# transfor the input array to a dataset
values <- .as_dataset(values)
# To the data set to a torcj transform in a dataloader to use the batch size
# To the data set to a torch transform in a dataloader to use the batch size
values <- torch::dataloader(values, batch_size = b_size)
# Do GPU classification with dataloader
values <- .try(
Expand Down
7 changes: 3 additions & 4 deletions R/sits_tuning.R
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@
#' \code{ml_method}. User can use \code{uniform}, \code{choice},
#' \code{randint}, \code{normal}, \code{lognormal}, \code{loguniform},
#' and \code{beta} distribution functions to randomize parameters.
#' @param trials Number of random trials to perform the random search.
#' @param progress Show progress bar?
#' @param multicores Number of cores to process in parallel
#' @param trials Number of random trials to perform the random search.
#' @param progress Show progress bar?
#' @param multicores Number of cores to process in parallel.
#'
#' @return
#' A tibble containing all parameters used to train on each trial
Expand Down Expand Up @@ -87,7 +87,6 @@ sits_tuning <- function(samples,
# check validation_split parameter if samples_validation is not passed
.check_num_parameter(validation_split, exclusive_min = 0, max = 0.5)
}

# check 'ml_functions' parameter
ml_function <- substitute(ml_method, env = environment())
if (is.call(ml_function))
Expand Down
2 changes: 1 addition & 1 deletion man/sits_classify.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion man/sits_tuning.Rd

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 7d0bd3c

Please sign in to comment.