Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Compile nntrainer on windows #2904

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,11 @@ if get_option('enable-fp16')
endif
endif

if get_option('enable-mmap')
message ('MMAP enabled')
extra_defines += '-DUSE_MMAP=1'
endif

if get_option('enable-opencl')
message ('OpenCL build is enabled. Will work only if OpenCL supported GPU is available.')
extra_defines += '-DENABLE_OPENCL=1'
Expand Down
1 change: 1 addition & 0 deletions meson_options.txt
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ option('enable-memory-swap', type: 'boolean', value: false)
option('memory-swap-path', type: 'string', value: '')
option('test-timeout', type: 'integer', value: 60)
option('opencl-kernel-path', type: 'string', value: 'nntrainer_opencl_kernels')
option('enable-mmap', type: 'boolean', value: true)

# dependency conflict resolution
option('capi-ml-inference-actual', type: 'string', value: 'capi-ml-inference',
Expand Down
8 changes: 0 additions & 8 deletions nntrainer/app_context.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -222,14 +222,6 @@ const std::string getFullPath(const std::string &path,

std::mutex factory_mutex;

/**
* @brief finalize global context
*
*/
static void fini_global_context_nntrainer(void) __attribute__((destructor));

static void fini_global_context_nntrainer(void) {}

Comment on lines -225 to -232
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does this cause a compile error for Windows target?

Copy link
Contributor Author

@gkisalapl gkisalapl Feb 5, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes attribute((destructor)) is GCC extension

std::once_flag global_app_context_init_flag;

static void add_default_object(AppContext &ac) {
Expand Down
1 change: 1 addition & 0 deletions nntrainer/compiler/remap_realizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include <functional>
#include <memory>
#include <string>
#include <vector>

#include <realizer.h>
Expand Down
5 changes: 2 additions & 3 deletions nntrainer/dataset/databuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <node_exporter.h>
#include <numeric>
#include <sstream>
#include <stdexcept>
#include <stdio.h>
Expand Down Expand Up @@ -61,9 +62,7 @@ class PropsBufferSize : public nntrainer::PositiveIntegerProperty {
constexpr char USER_DATA[] = "user_data";

DataBuffer::DataBuffer(std::unique_ptr<DataProducer> &&producer_) :
producer(std::move(producer_)),
db_props(new Props()),
user_data(nullptr) {
producer(std::move(producer_)), db_props(new Props()), user_data(nullptr) {
rng.seed(0);
}

Expand Down
2 changes: 1 addition & 1 deletion nntrainer/dataset/dir_data_producers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ DirDataProducer::finalize(const std::vector<TensorDim> &input_dims,
const auto &dir_path = std::get<props::DirPath>(*dir_data_props).get();

for (const auto &entry : std::filesystem::directory_iterator(dir_path))
class_names.push_back(entry.path());
class_names.push_back(entry.path().string());

num_class = class_names.size();

Expand Down
5 changes: 5 additions & 0 deletions nntrainer/layers/acti_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,11 @@
#include <blas_interface.h>
#include <common_properties.h>

#if defined(_WIN32)
#define _USE_MATH_DEFINES
#include <math.h>
#endif

namespace nntrainer {

class Tensor;
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/dropout.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ class DropOutLayer : public Layer {
/**
* @brief Constructor of DropOut Layer
*/
DropOutLayer(float dropout = 0.0) :
Layer(), dropout_rate(props::DropOutRate(dropout)), epsilon(1e-3) {}
DropOutLayer(float dropout = 0.0f) :
Layer(), dropout_rate(props::DropOutRate(dropout)), epsilon(1e-3f) {}

/**
* @brief Destructor of DropOut Layer
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/rnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ RNNLayer::RNNLayer() :
props::Unit(), props::HiddenStateActivation() = ActivationType::ACT_TANH,
props::ReturnSequences(), props::DropOutRate(), props::IntegrateBias()),
acti_func(ActivationType::ACT_NONE, true),
epsilon(1e-3) {
epsilon(1e-3f) {
wt_idx.fill(std::numeric_limits<unsigned>::max());
}

Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/rnncell.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RNNCellLayer::RNNCellLayer() :
props::HiddenStateActivation() = ActivationType::ACT_TANH,
props::DropOutRate()),
acti_func(ActivationType::ACT_NONE, true),
epsilon(1e-3) {
epsilon(1e-3f) {
wt_idx.fill(std::numeric_limits<unsigned>::max());
}

Expand Down
1 change: 1 addition & 0 deletions nntrainer/models/dynamic_training_optimization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
*
*/

#include <numeric>
#include <random>
#include <vector>

Expand Down
16 changes: 8 additions & 8 deletions nntrainer/nntrainer_log.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,27 +56,27 @@
#include <nntrainer_logger.h>

#if !defined(ml_logi)
#define ml_logi(format, args...) \
#define ml_logi(format, ...) \
__nntrainer_log_print(NNTRAINER_LOG_INFO, "(%s:%s:%d) " format, __FILE__, \
__func__, __LINE__, ##args)
__func__, __LINE__, ##__VA_ARGS__)
#endif

#if !defined(ml_logw)
#define ml_logw(format, args...) \
#define ml_logw(format, ...) \
__nntrainer_log_print(NNTRAINER_LOG_WARN, "(%s:%s:%d) " format, __FILE__, \
__func__, __LINE__, ##args)
__func__, __LINE__, ##__VA_ARGS__)
#endif

#if !defined(ml_loge)
#define ml_loge(format, args...) \
#define ml_loge(format, ...) \
__nntrainer_log_print(NNTRAINER_LOG_ERROR, "(%s:%s:%d) " format, __FILE__, \
__func__, __LINE__, ##args)
__func__, __LINE__, ##__VA_ARGS__)
#endif

#if !defined(ml_logd)
#define ml_logd(format, args...) \
#define ml_logd(format, ...) \
__nntrainer_log_print(NNTRAINER_LOG_DEBUG, "(%s:%s:%d) " format, __FILE__, \
__func__, __LINE__, ##args)
__func__, __LINE__, ##__VA_ARGS__)
#endif

#endif
Expand Down
5 changes: 3 additions & 2 deletions nntrainer/nntrainer_logger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
#include <sstream>
#include <stdarg.h>
#include <stdexcept>
#include <unistd.h>
#include <util_func.h>

namespace nntrainer {
Expand Down Expand Up @@ -78,7 +77,9 @@ Logger::Logger() : ts_type(NNTRAINER_LOG_TIMESTAMP_SEC) {
<< std::setw(2) << now.tm_sec << ".out";
outputstream.open(ss.str(), std::ios_base::app);
if (!outputstream.good()) {
char buf[256] = {0,};
char buf[256] = {
0,
};
std::string cur_path = std::string(buf);
std::string err_msg =
"Unable to initialize the Logger on path(" + cur_path + ")";
Expand Down
5 changes: 5 additions & 0 deletions nntrainer/optimizers/lr_scheduler_cosine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,12 @@
*
*/

#if defined(_WIN32)
#define _USE_MATH_DEFINES
#include <math.h>
#else
#include <cmath>
#endif

#include <common_properties.h>
#include <lr_scheduler_cosine.h>
Expand Down
1 change: 1 addition & 0 deletions nntrainer/tensor/float_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

#include <iomanip>
#include <iostream>
#include <numeric>

#include <blas_interface.h>
#include <float_tensor.h>
Expand Down
10 changes: 8 additions & 2 deletions nntrainer/tensor/manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,14 @@
#include <functional>
#include <limits>
#include <stdexcept>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include <vector>

#if !defined(_WIN32)
#include <sys/mman.h>
#include <unistd.h>
#endif

#include <activation_layer.h>
#include <basic_planner.h>
#include <bn_layer.h>
Expand All @@ -51,6 +54,8 @@
#include <var_grad.h>

namespace nntrainer {

#if !defined(_WIN32)
MMapedMemory::MMapedMemory(size_t size, bool allocate_fd_) :
fd(-1), buf(nullptr), buf_size(0), allocate_fd(allocate_fd_) {

Expand Down Expand Up @@ -136,6 +141,7 @@ MMapedMemory::~MMapedMemory() noexcept {
buf_size = 0;
ml_logd("[MMapedMemory] buf released");
}
#endif

void Manager::reinitialize() {
inputs_v2.clear();
Expand Down
3 changes: 3 additions & 0 deletions nntrainer/tensor/manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,8 @@

namespace nntrainer {
using ExecutionMode = ml::train::ExecutionMode;

#if !defined(_WIN32)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is there any reason to remove this Code for Windows?

/**
* @class MMappedMemory
* @brief Memory Handler, that has mmaped memory with a file descriptor
Expand Down Expand Up @@ -104,6 +106,7 @@ class MMapedMemory {
size_t buf_size; /**< buffer size */
bool allocate_fd; /**< option to choose to allocate an fd */
};
#endif

/**
* @class Manager
Expand Down
7 changes: 2 additions & 5 deletions nntrainer/tensor/swap_device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,7 @@
#include <malloc.h>
#include <profiler.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>

#include <nntrainer_error.h>
#include <nntrainer_log.h>
Expand All @@ -30,8 +28,7 @@ void SwapDevice::start(size_t size) {
if (fd > 0)
return;

fd =
open(dev_path.c_str(), O_RDWR | O_CREAT | O_TRUNC | O_SYNC, (mode_t)0666);
fd = open(dev_path.c_str(), O_RDWR | O_CREAT | O_TRUNC | O_SYNC, 0666UL);
NNTR_THROW_IF(fd < 0, std::runtime_error)
<< "SwapDevice: open file: " << dev_path;

Expand Down Expand Up @@ -153,7 +150,7 @@ void SwapDevice::putBuffer(void *ptr, bool dealloc_only) {
free(ptr);
allocated.erase(ptr);

#ifndef __ANDROID__
#if !defined(__ANDROID__) && !defined(_WIN32)
malloc_trim(0);
#endif

Expand Down
21 changes: 13 additions & 8 deletions nntrainer/tensor/swap_device.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,22 @@
#include <map>
#include <memory>
#include <string>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <system_error>
#include <unistd.h>
#include <utility>

/* Uncomment this to use mmap for swap data */
#define USE_MMAP
#if defined(_WIN32)
#include <io.h>
#define O_SYNC 0UL
#else
#include <sys/mman.h>
#include <unistd.h>
#endif

#if defined(_WIN32)
using ssize_t = std::make_signed_t<size_t>;
#endif

namespace nntrainer {

Expand All @@ -47,16 +54,14 @@ class SwapDevice {
*
*/
explicit SwapDevice(const std::string &name) :
dev_path(swap_device_default_path + name),
fd(-1) {}
dev_path(swap_device_default_path + name), fd(-1) {}

/**
* @brief SwapDevice default constructor
*
*/
explicit SwapDevice(const std::string &path, const std::string &name) :
dev_path(path + "/" + name),
fd(-1) {}
dev_path(path + "/" + name), fd(-1) {}

/**
* @brief SwapDevice destructor
Expand Down
1 change: 0 additions & 1 deletion nntrainer/tensor/task_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
#include <memory>
#include <mutex>
#include <thread>
#include <unistd.h>

#include <task.h>

Expand Down
2 changes: 2 additions & 0 deletions nntrainer/tensor/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
* @bug No known bugs except for NYI items
*/

#include <numeric>

#include <char_tensor.h>
#include <float_tensor.h>
#include <lazy_tensor.h>
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/tensor/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -1745,7 +1745,7 @@ class Tensor {
*/
bool isValid() const { return itensor->isValid(); };

static constexpr float epsilon = 1e-5;
static constexpr float epsilon = 1e-5f;

private:
std::shared_ptr<TensorBase> itensor;
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/tensor/tensor_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -704,7 +704,7 @@ class TensorBase {
*/
virtual bool isValid() const = 0;

static constexpr float epsilon = 1e-5;
static constexpr float epsilon = 1e-5f;

protected:
TensorDim dim;
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/tensor/weight.h
Original file line number Diff line number Diff line change
Expand Up @@ -364,9 +364,9 @@ class Weight : public Var_Grad {
const float getLossScale() { return loss_scale; };

private:
static constexpr float epsilon = 1e-6; /**< epsilon for zero comparison */
static constexpr float epsilon = 1e-6f; /**< epsilon for zero comparison */
static constexpr float epsilon_decay =
1e-8; /**< epsilon for zero comparison */
1e-8f; /**< epsilon for zero comparison */

WeightRegularizer regularizer; /**< regularizer for this variable */
float regularizer_constant; /**< constant factor for regularization */
Expand Down