Skip to content

Commit

Permalink
Updates
Browse files Browse the repository at this point in the history
  • Loading branch information
Cydral authored Sep 27, 2024
1 parent 684c56c commit 1a18dc2
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 10 deletions.
8 changes: 4 additions & 4 deletions dlib/cuda/cpu_dlib.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1625,7 +1625,7 @@ namespace dlib
const long num_channels,
tensor& dest,
const tensor& src,
size_t mode
size_t mode = 0
)
{
DLIB_ASSERT(num_channels * num_locations == src.nr() * src.nc() * src.k());
Expand Down Expand Up @@ -1701,7 +1701,7 @@ namespace dlib
tensor& grad,
const tensor& dest,
const tensor& gradient_input,
size_t mode
size_t mode = 0
)
{
DLIB_ASSERT(num_channels * num_locations == grad.nr() * grad.nc() * grad.k());
Expand Down Expand Up @@ -1802,7 +1802,7 @@ namespace dlib
)
{
DLIB_CASSERT(have_same_dimensions(dest,src));
ttimpl::softmax(1, src.nr()*src.nc()*src.k(), dest, src, 0);
ttimpl::softmax(1, src.nr()*src.nc()*src.k(), dest, src);
}

void softmax_all_gradient (
Expand All @@ -1813,7 +1813,7 @@ namespace dlib
{
DLIB_CASSERT(have_same_dimensions(grad,dest));
DLIB_CASSERT(have_same_dimensions(grad,gradient_input));
ttimpl::softmax_gradient(1, grad.nr()*grad.nc()*grad.k(), grad, dest, gradient_input, 0);
ttimpl::softmax_gradient(1, grad.nr()*grad.nc()*grad.k(), grad, dest, gradient_input);
}

// ------------------------------------------------------------------------------------
Expand Down
8 changes: 4 additions & 4 deletions dlib/cuda/cpu_dlib.h
Original file line number Diff line number Diff line change
Expand Up @@ -294,14 +294,14 @@ namespace dlib
void softmax (
tensor& dest,
const tensor& src,
size_t mode
size_t mode = 0
);

void softmax_gradient (
tensor& grad,
const tensor& dest,
const tensor& gradient_input,
size_t mode
size_t mode = 0
);

// ------------------------------------------------------------------------------------
Expand Down Expand Up @@ -768,7 +768,7 @@ namespace dlib
double& loss
) const
{
softmax(grad, output_tensor, 0);
softmax(grad, output_tensor);
// The loss we output is the average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (output_tensor.num_samples() * output_tensor.nr() * output_tensor.nc());
loss = 0;
Expand Down Expand Up @@ -833,7 +833,7 @@ namespace dlib
double& loss
) const
{
softmax(grad, output_tensor, 0);
softmax(grad, output_tensor);
// The loss we output is the weighted average loss over the mini-batch, and also over each element of the matrix output.
const double scale = 1.0 / (output_tensor.num_samples() * output_tensor.nr() * output_tensor.nc());
loss = 0;
Expand Down
4 changes: 2 additions & 2 deletions dlib/cuda/cudnn_dlibapi.h
Original file line number Diff line number Diff line change
Expand Up @@ -353,14 +353,14 @@ namespace dlib
void softmax (
tensor& dest,
const tensor& src,
size_t mode
size_t mode = 0
);

void softmax_gradient (
tensor& grad,
const tensor& dest,
const tensor& gradient_input,
size_t mode
size_t mode = 0
);

// ------------------------------------------------------------------------------------
Expand Down

0 comments on commit 1a18dc2

Please sign in to comment.