Skip to content

Commit

Permalink
[Code] Fix include issues for build (#13)
Browse files Browse the repository at this point in the history
  • Loading branch information
EgorOrachyov committed Feb 17, 2022
1 parent 08c542f commit 5a6e647
Show file tree
Hide file tree
Showing 3 changed files with 63 additions and 56 deletions.
111 changes: 57 additions & 54 deletions deps/nsparse/include/nsparse/unified_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,79 +3,82 @@
#include <thrust/detail/config.h>
#include <thrust/device_ptr.h>
#include <thrust/mr/allocator.h>
#include <thrust/memory/detail/device_system_resource.h>

#include <limits>
#include <stdexcept>
#include <iostream>

namespace nsparse {
inline cudaError_t cudaMallocManagedPrefetch(void** ptr, std::size_t bytes) {
auto status = thrust::system::cuda::detail::cudaMallocManaged(ptr, bytes);
if (status != cudaSuccess) {
return status;
}

int device = -1;
status = cudaGetDevice(&device);
if (status != cudaSuccess) {
return status;
}
status = cudaMemPrefetchAsync(*ptr, bytes, device, NULL);
return status;
}

using universal_prefetched_memory_resource =
inline cudaError_t cudaMallocManagedPrefetch(void **ptr, std::size_t bytes) {
auto status = thrust::system::cuda::detail::cudaMallocManaged(ptr, bytes);
if (status != cudaSuccess) {
return status;
}

int device = -1;
status = cudaGetDevice(&device);
if (status != cudaSuccess) {
return status;
}
status = cudaMemPrefetchAsync(*ptr, bytes, device, NULL);
return status;
}

using universal_prefetched_memory_resource =
thrust::system::cuda::detail::cuda_memory_resource<cudaMallocManagedPrefetch, cudaFree,
thrust::cuda::pointer<void>>;
thrust::cuda::pointer<void>>;
} // namespace nsparse

namespace thrust {

template <typename T>
class device_unified_allocator : public thrust::mr::stateless_resource_allocator<
T, device_ptr_memory_resource<nsparse::universal_prefetched_memory_resource>> {
typedef thrust::mr::stateless_resource_allocator<
T, device_ptr_memory_resource<nsparse::universal_prefetched_memory_resource>>
base;

public:
/*! The \p rebind metafunction provides the type of a \p device_allocator
* instantiated with another type.
*
* \tparam U the other type to use for instantiation.
*/
template <typename U>
struct rebind {
/*! The typedef \p other gives the type of the rebound \p device_allocator.
*/
typedef device_unified_allocator<U> other;
};

/*! Default constructor has no effect. */
__host__ device_unified_allocator() {
}
template<typename T>
class device_unified_allocator : public thrust::mr::stateless_resource_allocator<
T, device_ptr_memory_resource < nsparse::universal_prefetched_memory_resource>>

/*! Copy constructor has no effect. */
__host__ device_unified_allocator(const device_unified_allocator& other) : base(other) {
}
{
typedef thrust::mr::stateless_resource_allocator<
T, device_ptr_memory_resource < nsparse::universal_prefetched_memory_resource>>
base;

/*! Constructor from other \p device_allocator has no effect. */
template <typename U>
__host__ device_unified_allocator(const device_unified_allocator<U>& other) : base(other) {
}

/*! Destructor has no effect. */
__host__ ~device_unified_allocator() {
}
public:
/*! The \p rebind metafunction provides the type of a \p device_allocator
* instantiated with another type.
*
* \tparam U the other type to use for instantiation.
*/
template<typename U>
struct rebind {
/*! The typedef \p other gives the type of the rebound \p device_allocator.
*/
typedef device_unified_allocator<U> other;
};

/*! Default constructor has no effect. */
__host__ device_unified_allocator() {
}

/*! Copy constructor has no effect. */
__host__ device_unified_allocator(const device_unified_allocator &other) : base(other) {
}

/*! Constructor from other \p device_allocator has no effect. */
template<typename U>
__host__ device_unified_allocator(const device_unified_allocator<U> &other) : base(other) {
}

/*! Destructor has no effect. */
__host__ ~

device_unified_allocator() {
}
};

} // namespace thrust

namespace nsparse {

template <typename T>
using managed = thrust::device_unified_allocator<T>;
template<typename T>
using managed = thrust::device_unified_allocator<T>;

//template <typename T>
//using managed_vector = thrust::device_vector<T, managed<T>>;
Expand Down
2 changes: 1 addition & 1 deletion docs/tutorial.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ The following terminal commands allow to download the latest **pyspbla** package
python3 -m pip install pyspbla
```

## Fist pyspbla program
## First pyspbla program

Run python interpreter and execute following commands:

Expand Down
6 changes: 5 additions & 1 deletion spbla/sources/core/config.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,15 @@
#define SPBLA_CONFIG_HPP

#include <spbla/spbla.h>
#include <cstddef>

namespace spbla {
using index = spbla_Index;
using hints = spbla_Hints;
struct Pair { spbla_Index i; spbla_Index j; };
struct Pair {
spbla_Index i;
spbla_Index j;
};
}

#endif //SPBLA_CONFIG_HPP

0 comments on commit 5a6e647

Please sign in to comment.