diff --git a/CMakeLists.txt b/CMakeLists.txt index 760774b6b..62edeb331 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -41,6 +41,7 @@ caffe_option(USE_LEVELDB "Build with levelDB" ON) caffe_option(USE_LMDB "Build with lmdb" ON) caffe_option(ALLOW_LMDB_NOLOCK "Allow MDB_NOLOCK when reading LMDB files (only if necessary)" OFF) caffe_option(USE_OPENMP "Link with OpenMP (when your BLAS wants OpenMP and you get linker errors)" OFF) +caffe_option(HAVE_BINGING "caffe binding" ON) # ---[ Dependencies include(cmake/Dependencies.cmake) @@ -50,6 +51,8 @@ if(UNIX OR APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC -Wall") endif() +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -D_MWAITXINTRIN_H_INCLUDED") + caffe_set_caffe_link() if(USE_libstdcpp) @@ -107,6 +110,10 @@ add_subdirectory(python) add_subdirectory(matlab) add_subdirectory(docs) +if (HAVE_BINGING) + add_subdirectory(windows/caffe.binding) +endif() + # ---[ Linter target add_custom_target(lint COMMAND ${CMAKE_COMMAND} -P ${PROJECT_SOURCE_DIR}/cmake/lint.cmake) diff --git a/README.md b/README.md index 7a7cbff50..22981f416 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,12 @@ After you have built solution with Matlab support, in order to use it you have t ### Build Now, you should be able to build `.\windows\Caffe.sln` + +## Ubuntu 16.04 cmake build +`cmake . -DCUDA_NVCC_FLAGS="-D_FORCE_INLINES"` + +`make` + ## License and Citation Caffe is released under the [BSD 2-Clause license](https://github.com/BVLC/caffe/blob/master/LICENSE). diff --git a/python/caffe/_caffe.cpp b/python/caffe/_caffe.cpp index 8473d64e1..a1479f387 100644 --- a/python/caffe/_caffe.cpp +++ b/python/caffe/_caffe.cpp @@ -188,7 +188,7 @@ namespace caffe { bp::object labels_obj) { // check that this network has an input MemoryDataLayer shared_ptr > md_layer = - boost::dynamic_pointer_cast>(net->layers()[0]); + boost::dynamic_pointer_cast >(net->layers()[0]); if (!md_layer) { throw std::runtime_error("set_input_arrays may only be called if the" " first layer is a MemoryDataLayer"); @@ -588,4 +588,4 @@ namespace caffe { import_array1(); } -} // namespace caffe \ No newline at end of file +} // namespace caffe diff --git a/src/caffe/common.cpp b/src/caffe/common.cpp index 06d3637ac..36109657b 100644 --- a/src/caffe/common.cpp +++ b/src/caffe/common.cpp @@ -139,10 +139,11 @@ void* Caffe::RNG::generator() { #else // Normal GPU + CPU Caffe. Caffe::Caffe() - : cublas_handle_(NULL), curand_generator_(NULL), random_generator_(), + : cublas_handle_(NULL), curand_generator_(NULL), #ifdef USE_CUDNN - cudnn_handle_(NULL), + cudnn_handle_(NULL), #endif + random_generator_(), mode_(Caffe::CPU), solver_count_(1), solver_rank_(0), multiprocess_(false) { // Try to create a cublas handler, and report an error if failed (but we will diff --git a/src/caffe/layers/batch_contrastive_loss_layer.cpp b/src/caffe/layers/batch_contrastive_loss_layer.cpp index 912abe5af..0ce70d243 100644 --- a/src/caffe/layers/batch_contrastive_loss_layer.cpp +++ b/src/caffe/layers/batch_contrastive_loss_layer.cpp @@ -26,7 +26,13 @@ void BatchContrastiveLossLayer::Reshape(const vector*>& botto CHECK_EQ(bottom[0]->num(), bottom[0]->channels()); if (top.size() >= 2) { // positive distance, negative distance. +#if __cplusplus < 201103L + int arr[] = { 2 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ 2 }); +#endif } } @@ -34,7 +40,7 @@ template void BatchContrastiveLossLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { const Dtype* bottom_data = bottom[0]->cpu_data(); - Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); +// Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); const Dtype* label = bottom[1]->cpu_data(); int num = bottom[0]->num(); Dtype positive_distance = Dtype(0); diff --git a/src/caffe/layers/general_constrastive_loss_layer.cpp b/src/caffe/layers/general_constrastive_loss_layer.cpp index f84250714..8bcfbdefd 100644 --- a/src/caffe/layers/general_constrastive_loss_layer.cpp +++ b/src/caffe/layers/general_constrastive_loss_layer.cpp @@ -36,15 +36,33 @@ void GeneralContrastiveLossLayer::Reshape(const vector*>& bot if (top.size() >= 2) { if (add_intra_mae_) { // positive distance, negative distance, intra_mae. +#if __cplusplus < 201103L + int arr[] = { 3 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ 3 }); +#endif } else { // positive distance, negative distance. +#if __cplusplus < 201103L + int arr[] = { 2 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ 2 }); +#endif } } if (max_negative_only_) { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num() }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + max_negative_index_.Reshape(shape); +#else max_negative_index_.Reshape({ bottom[0]->num() }); +#endif } } @@ -59,7 +77,7 @@ void GeneralContrastiveLossLayer::Forward_cpu(const vector*>& int num = bottom[0]->num(); int count = bottom[0]->count(); int dim = count / num; - Dtype weighted_count = num * (abs(positive_weight_) + (dim - 1)*abs(negative_weight_)); +// Dtype weighted_count = num * (abs(positive_weight_) + (dim - 1)*abs(negative_weight_)); Dtype positive_distance = Dtype(0); Dtype negative_distance = Dtype(0); max_positive_index_ = 0; diff --git a/src/caffe/layers/general_triplet_loss_layer.cpp b/src/caffe/layers/general_triplet_loss_layer.cpp index 46333d541..5a17afbc8 100644 --- a/src/caffe/layers/general_triplet_loss_layer.cpp +++ b/src/caffe/layers/general_triplet_loss_layer.cpp @@ -27,10 +27,22 @@ void GeneralTripletLossLayer::Reshape(const vector*>& bottom, LossLayer::Reshape(bottom, top); if (top.size() >= 2) { // positive distance, negative distance. +#if __cplusplus < 201103L + int arr[] = { 2 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ 2 }); +#endif } if (hardest_only_) { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num() }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + hardest_index_.Reshape(shape); +#else hardest_index_.Reshape({ bottom[0]->num() }); +#endif } } diff --git a/src/caffe/layers/hotspot_layer.cpp b/src/caffe/layers/hotspot_layer.cpp index 6c0ab7363..c5e7641e5 100644 --- a/src/caffe/layers/hotspot_layer.cpp +++ b/src/caffe/layers/hotspot_layer.cpp @@ -38,7 +38,13 @@ void HotspotLayer::Reshape(const vector*>& bottom, height_ = bottom[1]->height(); width_ = bottom[1]->width(); } +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num(), num_point, height_, width_ }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[0]->Reshape(shape); +#else top[0]->Reshape({ bottom[0]->num(), num_point, height_, width_ }); +#endif } template diff --git a/src/caffe/layers/image_data_layer.cpp b/src/caffe/layers/image_data_layer.cpp index 00df2bb43..92e5f4741 100644 --- a/src/caffe/layers/image_data_layer.cpp +++ b/src/caffe/layers/image_data_layer.cpp @@ -55,9 +55,16 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, if (top.size() == 3) { num_samples_ = vector(max_label + 1); class_weights_ = vector(max_label + 1); +#if __cplusplus < 201103L + for (vector >::iterator l = lines_.begin(); l != lines_.end(); ++l) { + num_samples_[l->second]++; + } +#else for (auto l : lines_) { num_samples_[l.second]++; } +#endif + Dtype mean_sample_num = (Dtype)lines_.size() / (Dtype)(max_label + 1); Dtype min_weight = 9999, max_weight = 0; for (int i = 0; i < num_samples_.size(); i++) { @@ -76,10 +83,17 @@ void ImageDataLayer::DataLayerSetUp(const vector*>& bottom, if (balance_) { num_samples_ = vector(max_label + 1); filename_by_class_ = vector > >(max_label + 1); +#if __cplusplus < 201103L + for (vector >::iterator l = lines_.begin(); l != lines_.end(); ++l) { + num_samples_[l->second]++; + filename_by_class_[l->second].push_back(std::make_pair(l->first, 0)); + } +#else for (auto l : lines_) { num_samples_[l.second]++; filename_by_class_[l.second].push_back(std::make_pair(l.first, 0)); } +#endif class_id_ = 0; } @@ -194,6 +208,20 @@ void ImageDataLayer::load_batch(Batch* batch) { if (balance_) { int pick_index = (caffe_rng_rand() % num_samples_[class_id_]) + 1; +#if __cplusplus < 201103L + vector >* samples = &filename_by_class_[class_id_]; + for (vector >::iterator sample = samples->begin(); sample != samples->end(); ++sample) { + if (sample->second == 0) { + pick_index--; + if (pick_index == 0) { + this_line = std::make_pair(sample->first, class_id_); + sample->second = 1; + num_samples_[class_id_]--; + break; + } + } + } +#else for (auto& sample : filename_by_class_[class_id_]) { if (sample.second == 0) { pick_index--; @@ -205,12 +233,20 @@ void ImageDataLayer::load_batch(Batch* batch) { } } } +#endif CHECK_GT(this_line.first.size(), 0); if (num_samples_[class_id_] == 0) { num_samples_[class_id_] = filename_by_class_[class_id_].size(); +#if __cplusplus < 201103L + vector >* samples = &filename_by_class_[class_id_]; + for (vector >::iterator sample = samples->begin(); sample != samples->end(); ++sample) { + sample->second = 0; + } +#else for (auto& sample : filename_by_class_[class_id_]) { sample.second = 0; } +#endif } } else { diff --git a/src/caffe/layers/insanity_layer.cpp b/src/caffe/layers/insanity_layer.cpp index f0d25b285..93615891a 100644 --- a/src/caffe/layers/insanity_layer.cpp +++ b/src/caffe/layers/insanity_layer.cpp @@ -13,7 +13,7 @@ void InsanityLayer::LayerSetUp(const vector*>& bottom, CHECK_GE(bottom[0]->num_axes(), 2) << "Number of axes of bottom blob must be >=2."; InsanityParameter insanity_param_ = this->layer_param().insanity_param(); - int channels = bottom[0]->channels(); + // int channels = bottom[0]->channels(); lb_ = insanity_param_.lb(); ub_ = insanity_param_.ub(); CHECK_GT(ub_, lb_) << "upper bound must > lower bound."; @@ -40,8 +40,8 @@ void InsanityLayer::Forward_cpu(const vector*>& bottom, const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const int count = bottom[0]->count(); - const int dim = bottom[0]->count(2); - const int channels = bottom[0]->channels(); +// const int dim = bottom[0]->count(2); +// const int channels = bottom[0]->channels(); Dtype* slope_data = alpha.mutable_cpu_data(); // For in-place computation @@ -73,8 +73,8 @@ void InsanityLayer::Backward_cpu(const vector*>& top, const Dtype* slope_data = alpha.cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); const int count = bottom[0]->count(); - const int dim = bottom[0]->count(2); - const int channels = bottom[0]->channels(); +// const int dim = bottom[0]->count(2); +// const int channels = bottom[0]->channels(); // For in-place computation if (top[0] == bottom[0] && lb_ < 0) { diff --git a/src/caffe/layers/label_specific_rescale.cpp b/src/caffe/layers/label_specific_rescale.cpp index 2ebe87030..32de73ee4 100644 --- a/src/caffe/layers/label_specific_rescale.cpp +++ b/src/caffe/layers/label_specific_rescale.cpp @@ -55,7 +55,7 @@ void LabelSpecificRescaleLayer::Backward_cpu(const vector*>& const vector& propagate_down, const vector*>& bottom) { if (propagate_down[0]) { - const Dtype* bottom_data = bottom[0]->cpu_data(); +// const Dtype* bottom_data = bottom[0]->cpu_data(); const Dtype* label_data = bottom[1]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_cpu_diff(); diff --git a/src/caffe/layers/multi_label_image_data_layer.cpp b/src/caffe/layers/multi_label_image_data_layer.cpp index a419c8693..e404b9768 100644 --- a/src/caffe/layers/multi_label_image_data_layer.cpp +++ b/src/caffe/layers/multi_label_image_data_layer.cpp @@ -8,7 +8,11 @@ #include #include #include +#if __cplusplus >= 201103L #include +#else +#include +#endif #include "caffe/layers/multi_label_image_data_layer.hpp" #include "caffe/util/benchmark.hpp" @@ -23,7 +27,11 @@ namespace caffe { this->StopInternalThread(); } +#if __cplusplus >= 201103L typedef std::mt19937 RANDOM_ENGINE; +#else + typedef boost::mt19937 RANDOM_ENGINE; +#endif template void extract_face(cv::Mat& input_image, Dtype* points, int point_count, @@ -40,14 +48,27 @@ namespace caffe { double face_scale = 2 * sqrt((face_center.x - mouth_center.x) * (face_center.x - mouth_center.x) + (face_center.y - mouth_center.y) * (face_center.y - mouth_center.y)); RANDOM_ENGINE prnd(time(NULL)); + +#if __cplusplus >= 201103L face_center.x += std::uniform_int_distribution(-max_random_shift, max_random_shift)(prnd); face_center.y += std::uniform_int_distribution(-max_random_shift, max_random_shift)(prnd); std::uniform_real_distribution rand_uniform(0, 1); +#else + face_center.x += boost::random::uniform_int_distribution(-max_random_shift, max_random_shift)(prnd); + face_center.y += boost::random::uniform_int_distribution(-max_random_shift, max_random_shift)(prnd); + boost::random::uniform_real_distribution rand_uniform(0, 1); +#endif + // shear float s = rand_uniform(prnd) * max_shear_ratio * 2 - max_shear_ratio; // rotate +#if __cplusplus >= 201103L int angle = std::uniform_int_distribution( -max_rotate_angle, max_rotate_angle)(prnd); +#else + int angle = boost::random::uniform_int_distribution( + -max_rotate_angle, max_rotate_angle)(prnd); +#endif float a = cos(angle / 180.0 * CV_PI); float b = sin(angle / 180.0 * CV_PI); // scale @@ -61,7 +82,11 @@ namespace caffe { float ws = ratio * hs; int flip = 1; if (face_mirror) { +#if __cplusplus >= 201103L flip = std::uniform_int_distribution(0, 1)(prnd)* 2 - 1; +#else + flip = boost::random::uniform_int_distribution(0, 1)(prnd)* 2 - 1; +#endif } hs *= flip; @@ -149,10 +174,17 @@ namespace caffe { if (balance_) { num_samples_ = vector(max_label + 1); filename_by_class_ = vector > > > >(max_label + 1); +#if __cplusplus >= 201103L for (auto& l : lines_) { num_samples_[(*l.second)[balance_by_]]++; filename_by_class_[(*l.second)[balance_by_]].push_back(l); } +#else + for (typename vector > > >::iterator l = lines_.begin(); l != lines_.end(); ++l) { + num_samples_[(*l->second)[balance_by_]]++; + filename_by_class_[(*l->second)[balance_by_]].push_back(*l); + } +#endif class_id_ = 0; } @@ -194,7 +226,12 @@ namespace caffe { << top[0]->channels() << "," << top[0]->height() << "," << top[0]->width(); // label +#if __cplusplus >= 201103L vector label_shape = { batch_size - label_cut_start_ - label_cut_end_, label_count }; +#else + int arr[] = { batch_size - label_cut_start_ - label_cut_end_, label_count }; + vector label_shape(arr,arr+sizeof(arr)/sizeof(int)); +#endif top[1]->Reshape(label_shape); for (int i = 0; i < this->prefetch_.size(); ++i) { this->prefetch_[i]->label_.Reshape(label_shape); diff --git a/src/caffe/layers/nca_loss_layer.cpp b/src/caffe/layers/nca_loss_layer.cpp index 7d0226092..6c98dafb3 100644 --- a/src/caffe/layers/nca_loss_layer.cpp +++ b/src/caffe/layers/nca_loss_layer.cpp @@ -19,10 +19,22 @@ void NCALossLayer::Reshape(const vector*>& bottom, LossLayer::Reshape(bottom, top); if (top.size() == 2) { // positive distance, negative distance. +#if __cplusplus < 201103L + int arr[] = { 2 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ 2 }); +#endif } if (min_negative_only_) { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num() }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + min_negative_index_.Reshape(shape); +#else min_negative_index_.Reshape({ bottom[0]->num() }); +#endif } } @@ -85,7 +97,7 @@ void NCALossLayer::Backward_cpu(const vector*>& top, int count = bottom[0]->count(); int dim = count / num; - Dtype negative_sum = Dtype(0); +// Dtype negative_sum = Dtype(0); for (int i = 0; i < num; ++i) { if (min_negative_only_) { diff --git a/src/caffe/layers/normalize_layer.cpp b/src/caffe/layers/normalize_layer.cpp index 8a1c9f341..ffbf1a401 100644 --- a/src/caffe/layers/normalize_layer.cpp +++ b/src/caffe/layers/normalize_layer.cpp @@ -131,4 +131,4 @@ STUB_GPU(NormalizeLayer); INSTANTIATE_CLASS(NormalizeLayer); REGISTER_LAYER_CLASS(Normalize); -} // namespace caffe \ No newline at end of file +} // namespace caffe diff --git a/src/caffe/layers/pairwise_layer.cpp b/src/caffe/layers/pairwise_layer.cpp index 55ba5e6f0..161e70881 100644 --- a/src/caffe/layers/pairwise_layer.cpp +++ b/src/caffe/layers/pairwise_layer.cpp @@ -33,7 +33,13 @@ void PairwiseLayer::Reshape(const vector*>& bottom, M_ = bottom[0]->num(); N_ = bottom[1]->num(); K_ = bottom[0]->channels(); +#if __cplusplus < 201103L + int arr[] = { M_, N_, K_ }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[0]->Reshape(shape); +#else top[0]->Reshape({ M_, N_, K_ }); +#endif // If max operation, we will initialize the vector index part. if (this->layer_param_.pairwise_param().operation() == PairwiseParameter_PairwiseOp_MAX && top.size() == 1) { @@ -96,8 +102,8 @@ template void PairwiseLayer::Backward_cpu(const vector*>& top, const vector& propagate_down, const vector*>& bottom) { const int* mask = NULL; - const int count = top[0]->count(); - const Dtype* top_data = top[0]->cpu_data(); + // const int count = top[0]->count(); +// const Dtype* top_data = top[0]->cpu_data(); const Dtype* top_diff = top[0]->cpu_diff(); const Dtype* bottom_data_a = bottom[0]->cpu_data(); const Dtype* bottom_data_b = bottom[1]->cpu_data(); diff --git a/src/caffe/layers/pooling_layer.cu b/src/caffe/layers/pooling_layer.cu index 59d624f07..ae23915b4 100644 --- a/src/caffe/layers/pooling_layer.cu +++ b/src/caffe/layers/pooling_layer.cu @@ -378,10 +378,10 @@ if ((n==0)&&( top_data[n*channels_+ch] > 10000)) int phS = ph * stride_; // for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; - int defval = phS * width_ + pwS; +// int defval = phS * width_ + pwS; // LOG(INFO) << ph <<","<< pw; - int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; - int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; +// int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; +// int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype hdif = phS - Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype vdif = pwS - Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; // LOG(INFO) << "fp data[" << defval << "]:" << data_pointer[defval] <<","<< Mdt[defval]; @@ -495,7 +495,7 @@ LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; int phS = ph * stride_; for (int pw = 0; pw < pooled_width_; ++pw) { int pwS = pw * stride_; - int defval = phS * width_ + pwS; +// int defval = phS * width_ + pwS; int ihval = Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; int ivval = Iv[(n*channels_+ch)*N_+phS * width_ + pwS]; Dtype hdif = phS - Ih[(n*channels_+ch)*N_+phS * width_ + pwS]; @@ -568,8 +568,8 @@ LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; for (int n = 0; n < bottom[0]->num(); ++n) { int N2 = pooled_height_*pooled_width_; - int center = kernel_size_/2; - int def_center = center+center*kernel_size_; +// int center = kernel_size_/2; +// int def_center = center+center*kernel_size_; for (int ch = 0; ch < channels_; ++ch) { @@ -617,9 +617,9 @@ LOG(INFO) << "Forward_gpu PoolingParameter_PoolMethod_DEF 2.4"; if (phS > height_-kernel_size_) phS = height_ - kernel_size_; for (int pw = 0; pw < pooled_width_; ++pw) { - int pwS = pw * stride_; - int defval = phS * width_ + pwS; - int defwidx; +// int pwS = pw * stride_; +// int defval = phS * width_ + pwS; +// int defwidx; int maxIdx; int hstart = ph * stride_h_ - pad_h_; @@ -944,7 +944,7 @@ void PoolingLayer::Backward_gpu(const vector*>& top, int* defp; int N2; int Nparam = kernel_size_*kernel_size_; - const Dtype* bottom_data_p; +// const Dtype* bottom_data_p; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { @@ -1196,25 +1196,25 @@ void PoolingLayer::Backward_gpu(const vector*>& top, case PoolingParameter_PoolMethod_DEF_ALL4: //LOG(INFO) << "Backward_gpu PoolingParameter_PoolMethod_DEF_ALL3"; bottom_diff = bottom[0]->mutable_cpu_diff(); - top_diff = top[0]->cpu_diff(); - Ih = Ih_.data(); - Iv = Iv_.data(); - defp = defp_.data(); - d_defw = this->blobs_[0]->mutable_cpu_diff(); - memset(d_defw, 0, channels_*kernel_size_*kernel_size_* sizeof(Dtype)); - dh = dh_.mutable_cpu_data(); - dv = dv_.mutable_cpu_data(); + top_diff = top[0]->cpu_diff(); + Ih = Ih_.data(); + Iv = Iv_.data(); + defp = defp_.data(); + d_defw = this->blobs_[0]->mutable_cpu_diff(); + memset(d_defw, 0, channels_*kernel_size_*kernel_size_* sizeof(Dtype)); + dh = dh_.mutable_cpu_data(); + dv = dv_.mutable_cpu_data(); N2 = pooled_width_*pooled_height_; - for (int n = 0; n < top[0]->num(); ++n) { - for (int ch = 0; ch < channels_; ++ch) { + for (int n = 0; n < top[0]->num(); ++n) { + for (int ch = 0; ch < channels_; ++ch) { /* int vstart = Iv[(n*channels_+ch)*N_+defp[ch]]; int hstart = Ih[(n*channels_+ch)*N_+defp[ch]]; int vend = min(vstart + kernel_size_, width_); int hend = min(hstart + kernel_size_, height_);*/ for (int ph = 0; ph < pooled_height_; ++ph) { - int phS = ph * stride_; + //int phS = ph * stride_; for (int pw = 0; pw < pooled_width_; ++pw) { - int pwS = pw * stride_; +// int pwS = pw * stride_; int idxInv = Ih[(n*channels_+ch)*N2+ph * pooled_width_ + pw]; Dtype t_dif = top_diff[ph * pooled_width_ + pw]; bottom_diff[idxInv] += t_dif; diff --git a/src/caffe/layers/predict_box_layer.cpp b/src/caffe/layers/predict_box_layer.cpp index d8150f11f..33daffdda 100644 --- a/src/caffe/layers/predict_box_layer.cpp +++ b/src/caffe/layers/predict_box_layer.cpp @@ -47,12 +47,25 @@ void PredictBoxLayer::Reshape(const vector*>& bottom, CHECK_EQ(bottom[0]->height(), bottom[2]->height()); CHECK_EQ(bottom[0]->width(), bottom[2]->width()); } - +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num(), 5, bottom[0]->height(), bottom[0]->width() }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[0]->Reshape(shape); + if (output_vector_) { + int arr_output[] = { bottom[0]->num(), 1, 5 }; + vector shape_output(arr_output,arr_output+sizeof(arr_output)/sizeof(int)); + top[1]->Reshape(shape_output);//will be modified on the fly. + } + int arr_counter[] = { bottom[0]->num(),1,bottom[0]->height(),bottom[0]->width() }; + vector shape_counter(arr_counter,arr_counter+sizeof(arr_counter)/sizeof(int)); + counter_.Reshape(shape_counter); +#else top[0]->Reshape({ bottom[0]->num(), 5, bottom[0]->height(), bottom[0]->width() }); if (output_vector_) { top[1]->Reshape({ bottom[0]->num(), 1, 5 });//will be modified on the fly. } counter_.Reshape({ bottom[0]->num(),1,bottom[0]->height(),bottom[0]->width() }); +#endif } template @@ -111,7 +124,13 @@ void PredictBoxLayer::Forward_cpu( if (output_vector_) { if (num == 1 && count > 0) { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num(), count, 5 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ bottom[0]->num(), count, 5 }); +#endif int i = 0; for (int x = 0; x < output_width; x++) { for (int y = 0; y < output_height; y++) { @@ -127,7 +146,13 @@ void PredictBoxLayer::Forward_cpu( } } else { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num(), 1, 5 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ bottom[0]->num(), 1, 5 }); +#endif caffe_set(top[1]->count(), 0, top[1]->mutable_cpu_data()); } } diff --git a/src/caffe/layers/predict_box_layer.cu b/src/caffe/layers/predict_box_layer.cu index e28f123ad..87d74bd4e 100644 --- a/src/caffe/layers/predict_box_layer.cu +++ b/src/caffe/layers/predict_box_layer.cu @@ -139,7 +139,13 @@ void PredictBoxLayer::Forward_gpu( const Dtype* score_data_cpu = bottom[0]->cpu_data(); const Dtype* bb_data_cpu = top[0]->cpu_data(); if (num == 1 && count > 0) { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num(), (int)count, 5 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ bottom[0]->num(), (int)count, 5 }); +#endif int i = 0; for (int x = 0; x < output_width; x++) { for (int y = 0; y < output_height; y++) { @@ -155,7 +161,13 @@ void PredictBoxLayer::Forward_gpu( } } else { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->num(), 1, 5 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); +#else top[1]->Reshape({ bottom[0]->num(), 1, 5 }); +#endif caffe_gpu_set(top[1]->count(), 0, top[1]->mutable_gpu_data()); } } diff --git a/src/caffe/layers/soft_contrastive_loss_layer.cpp b/src/caffe/layers/soft_contrastive_loss_layer.cpp index 43688544e..f96bc8c03 100644 --- a/src/caffe/layers/soft_contrastive_loss_layer.cpp +++ b/src/caffe/layers/soft_contrastive_loss_layer.cpp @@ -27,9 +27,19 @@ void SoftContrastiveLossLayer::Reshape(const vector*>& bottom LossLayer::Reshape(bottom, top); if (top.size() >= 2) { // positive distance, negative distance. +#if __cplusplus < 201103L + int arr[] = { 2 }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[1]->Reshape(shape); + } + int arr_sum_exp[] = { bottom[0]->num(), 1 }; + vector shape_sum_exp(arr_sum_exp,arr_sum_exp+sizeof(arr_sum_exp)/sizeof(int)); + sum_exp_.Reshape(shape_sum_exp); +#else top[1]->Reshape({ 2 }); } sum_exp_.Reshape({ bottom[0]->num(), 1 }); +#endif } template @@ -42,7 +52,7 @@ void SoftContrastiveLossLayer::Forward_cpu(const vector*>& bo int num = bottom[0]->num(); int count = bottom[0]->count(); int dim = count / num; - Dtype weighted_count = num * (abs(positive_weight_) + (dim - 1)*abs(negative_weight_)); +// Dtype weighted_count = num * (abs(positive_weight_) + (dim - 1)*abs(negative_weight_)); Dtype positive_distance = Dtype(0); Dtype negative_distance = Dtype(0); Dtype* loss = top[0]->mutable_cpu_data(); @@ -110,11 +120,11 @@ void SoftContrastiveLossLayer::Backward_cpu(const vector*>& t int count = bottom[0]->count(); int dim = count / num; - Dtype negative_sum = Dtype(0); + // Dtype negative_sum = Dtype(0); for (int i = 0; i < num; ++i) { - Dtype min_negative_distance = FLT_MAX; - int min_negative_index = 0; +// Dtype min_negative_distance = FLT_MAX; +// int min_negative_index = 0; for (int j = 0; j < dim; ++j) { if (j == static_cast(label[i])) { if (bottom_data[i * dim + j] > positive_margin_ && bottom_data[i * dim + j] < positive_outlier_thresh_) { diff --git a/src/caffe/layers/softmax_loss_layer.cpp b/src/caffe/layers/softmax_loss_layer.cpp index 21576170b..581613152 100644 --- a/src/caffe/layers/softmax_loss_layer.cpp +++ b/src/caffe/layers/softmax_loss_layer.cpp @@ -64,7 +64,13 @@ void SoftmaxWithLossLayer::LayerSetUp( softmax_axis_ = bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); if (has_class_weight_) { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->shape(softmax_axis_) }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + class_weight_.Reshape(shape); +#else class_weight_.Reshape({ bottom[0]->shape(softmax_axis_) }); +#endif CHECK_EQ(this->layer_param_.softmax_param().class_weight().size(), bottom[0]->shape(softmax_axis_)); for (int i = 0; i < bottom[0]->shape(softmax_axis_); i++) { class_weight_.mutable_cpu_data()[i] = (Dtype)this->layer_param_.softmax_param().class_weight(i); @@ -72,7 +78,13 @@ void SoftmaxWithLossLayer::LayerSetUp( } else { if (bottom.size() == 3) { +#if __cplusplus < 201103L + int arr[] = { bottom[0]->shape(softmax_axis_) }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + class_weight_.Reshape(shape); +#else class_weight_.Reshape({ bottom[0]->shape(softmax_axis_) }); +#endif for (int i = 0; i < bottom[0]->shape(softmax_axis_); i++) { class_weight_.mutable_cpu_data()[i] = (Dtype)1.0; } @@ -97,8 +109,15 @@ void SoftmaxWithLossLayer::Reshape( bottom[0]->CanonicalAxisIndex(this->layer_param_.softmax_param().axis()); outer_num_ = bottom[0]->count(0, softmax_axis_); inner_num_ = bottom[0]->count(softmax_axis_ + 1); +#if __cplusplus < 201103L + int arr[] = { outer_num_, inner_num_ }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + counts_.Reshape(shape); + loss_.Reshape(shape); +#else counts_.Reshape({ outer_num_, inner_num_ }); loss_.Reshape({ outer_num_, inner_num_ }); +#endif CHECK_EQ(outer_num_ * inner_num_, bottom[1]->count()) << "Number of labels must match number of predictions; " << "e.g., if softmax axis == 1 and prediction shape is (N, C, H, W), " diff --git a/src/caffe/layers/subregion_layer.cpp b/src/caffe/layers/subregion_layer.cpp index 299079a47..0d01ffbd2 100644 --- a/src/caffe/layers/subregion_layer.cpp +++ b/src/caffe/layers/subregion_layer.cpp @@ -28,12 +28,25 @@ void SubRegionLayer::Reshape(const vector*>& bottom, << "corresponding to (num, channels, height, width)"; CHECK_EQ(bottom[1]->channels() % 2, 0) << "The coordinate blob's size must be able to divided by 2!"; int num_point = bottom[1]->channels() / 2; +#if __cplusplus < 201103L + if (as_dim_ == 0) { + int arr[] = { bottom[0]->num() * num_point, bottom[0]->channels(), height_, width_ }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[0]->Reshape(shape); + } + else { + int arr[] = { bottom[0]->num(), bottom[0]->channels() * num_point, height_, width_ }; + vector shape(arr,arr+sizeof(arr)/sizeof(int)); + top[0]->Reshape(shape); + } +#else if (as_dim_ == 0) { top[0]->Reshape({ bottom[0]->num() * num_point, bottom[0]->channels(), height_, width_ }); } else { top[0]->Reshape({ bottom[0]->num(), bottom[0]->channels() * num_point, height_, width_ }); } +#endif if (top.size() == 3) { top[1]->ReshapeLike(*bottom[1]); top[2]->ReshapeLike(*bottom[1]); diff --git a/src/caffe/layers/transformer_layer.cpp b/src/caffe/layers/transformer_layer.cpp index 1ba9b78b7..034ba1e5b 100644 --- a/src/caffe/layers/transformer_layer.cpp +++ b/src/caffe/layers/transformer_layer.cpp @@ -34,7 +34,7 @@ namespace caffe { template void TransformerLayer::Forward_cpu(const vector*>& bottom, const vector*>& top) { - const Dtype* bottom_data = bottom[0]->cpu_data(); +// const Dtype* bottom_data = bottom[0]->cpu_data(); Dtype* top_data = top[0]->mutable_cpu_data(); const Dtype* theta_data = bottom[1]->cpu_data(); const Dtype* CoordinateTarget_data = CoordinateTarget.cpu_data(); @@ -94,7 +94,7 @@ namespace caffe { const vector& propagate_down, const vector*>& bottom) { const Dtype* top_diff = top[0]->cpu_diff(); - const Dtype* top_data = top[0]->cpu_data(); + // const Dtype* top_data = top[0]->cpu_data(); Dtype* data_diff = bottom[0]->mutable_cpu_diff(); Dtype* theta_diff = bottom[1]->mutable_cpu_diff(); int num = bottom[0]->shape(0); diff --git a/windows/caffe.binding/CMakeLists.txt b/windows/caffe.binding/CMakeLists.txt new file mode 100644 index 000000000..086d18afd --- /dev/null +++ b/windows/caffe.binding/CMakeLists.txt @@ -0,0 +1,28 @@ +if(NOT HAVE_BINGING) + message(STATUS "caffe binding interface is disabled. Building without it...") + return() +endif() + +# ---[ caffe_binding project +project(caffe_binding C CXX) + +file(GLOB_RECURSE caffe_binding_srcs ${PROJECT_SOURCE_DIR}/*.cpp) + +SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + +add_library(caffe_binding SHARED ${caffe_binding_srcs}) +caffe_default_properties(caffe_binding) +set_target_properties(caffe_binding PROPERTIES PREFIX "" OUTPUT_NAME "caffe_binding") +target_link_libraries(caffe_binding PUBLIC ${Caffe_LINK}) + +if(UNIX OR APPLE) + set(__linkname "${PROJECT_SOURCE_DIR}/caffe_binding.so") + add_custom_command(TARGET caffe_binding POST_BUILD + COMMAND ln -sf $ "${__linkname}" + COMMENT "Creating symlink ${__linkname} -> ${PROJECT_BINARY_DIR}/lib/caffe_binding${Caffe_POSTFIX}.so") +endif() + +# ---[ Install +# caffe_binding.so +install(TARGETS caffe_binding DESTINATION caffe_binding) + diff --git a/windows/caffe.binding/CaffeBinding.cpp b/windows/caffe.binding/CaffeBinding.cpp index 496d72613..b7bea1072 100644 --- a/windows/caffe.binding/CaffeBinding.cpp +++ b/windows/caffe.binding/CaffeBinding.cpp @@ -1,8 +1,12 @@ #include "CaffeBinding.h" -#include -#include +#include +#include #include +#if __cplusplus < 201402L +#include +#endif + using namespace caffe; using namespace std; @@ -26,8 +30,13 @@ int CaffeBinding::AddNet(string model_definition, string weights, int gpu_id) { std::unordered_map CaffeBinding::Forward(int net_id) { if (!(*predictors_[net_id]).get()) { +#if __cplusplus >= 201402L auto predictor = std::make_unique>(prototxts[net_id], Phase::TEST); +#else + auto predictor = + boost::make_unique>(prototxts[net_id], Phase::TEST); +#endif predictor->ShareTrainedLayersWith(nets_[net_id]); (*predictors_[net_id]).reset(predictor.release()); } @@ -48,8 +57,13 @@ std::unordered_map CaffeBinding::Forward(std::vector&& input_image, int net_id) { if (!(*predictors_[net_id]).get()) { +#if __cplusplus >= 201402L auto predictor = std::make_unique>(prototxts[net_id], Phase::TEST); +#else + auto predictor = + boost::make_unique>(prototxts[net_id], Phase::TEST); +#endif predictor->ShareTrainedLayersWith(nets_[net_id]); (*predictors_[net_id]).reset(predictor.release()); } @@ -62,8 +76,13 @@ void CaffeBinding::SetMemoryDataLayer(std::string layer_name, std::vector blob_shape, float* data, int net_id) { if (!(*predictors_[net_id]).get()) { +#if __cplusplus >= 201402L auto predictor = std::make_unique>(prototxts[net_id], Phase::TEST); +#else + auto predictor = + boost::make_unique>(prototxts[net_id], Phase::TEST); +#endif predictor->ShareTrainedLayersWith(nets_[net_id]); (*predictors_[net_id]).reset(predictor.release()); } @@ -74,8 +93,13 @@ void CaffeBinding::SetBlobData(std::string blob_name, std::vector blob_shap DataBlob CaffeBinding::GetBlobData(std::string blob_name, int net_id) { if (!(*predictors_[net_id]).get()) { +#if __cplusplus >= 201402L auto predictor = - std::make_unique>(prototxts[net_id], Phase::TEST); + std::make_unique >(prototxts[net_id], Phase::TEST); +#else + auto predictor = + boost::make_unique >(prototxts[net_id], Phase::TEST); +#endif predictor->ShareTrainedLayersWith(nets_[net_id]); (*predictors_[net_id]).reset(predictor.release()); } diff --git a/windows/caffe.binding/CaffeBinding.h b/windows/caffe.binding/CaffeBinding.h index 10d5a8e81..8338c367b 100644 --- a/windows/caffe.binding/CaffeBinding.h +++ b/windows/caffe.binding/CaffeBinding.h @@ -1,12 +1,16 @@ #pragma once +#ifdef _MSC_VER #ifdef CAFFEBINDING_EXPORTS #define CAFFE_DLL __declspec(dllexport) #else #define CAFFE_DLL __declspec(dllimport) #endif +#else +#define CAFFE_DLL +#endif -#include +#include #include #include #include @@ -35,4 +39,4 @@ namespace caffe { void SetDevice(int gpu_id); ~CaffeBinding(); }; -} \ No newline at end of file +}