Skip to content

Commit

Permalink
Global structures should work with single MPI rank (#104)
Browse files Browse the repository at this point in the history
* Global structures should work with single MPI rank

* allow null, to enable mpi without communication pattern

* PM should be initialized with NULL
  • Loading branch information
ntrost57 authored Jul 21, 2022
1 parent f610c52 commit ce712e0
Show file tree
Hide file tree
Showing 11 changed files with 245 additions and 166 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ endif()


# Setup version
set(VERSION_STRING "2.1.1")
set(VERSION_STRING "2.1.2")
rocm_setup_version(VERSION ${VERSION_STRING})
set(rocalution_SOVERSION 0.1)

Expand Down
17 changes: 17 additions & 0 deletions clients/include/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,30 @@ void distribute_matrix(const MPI_Comm* comm,
MPI_Comm_size(*comm, &num_procs);

size_t global_nrow = lmat->GetM();
size_t global_ncol = lmat->GetN();
size_t global_nnz = lmat->GetNnz();

int* global_row_offset = NULL;
int* global_col = NULL;
ValueType* global_val = NULL;

lmat->LeaveDataPtrCSR(&global_row_offset, &global_col, &global_val);

// If we have only a single MPI rank, we are done
if(num_procs == 1)
{
pm->SetMPICommunicator(comm);
pm->SetGlobalNrow(global_nrow);
pm->SetGlobalNcol(global_ncol);
pm->SetLocalNrow(global_nrow);
pm->SetLocalNcol(global_ncol);

gmat->SetParallelManager(*pm);
gmat->SetLocalDataPtrCSR(&global_row_offset, &global_col, &global_val, "mat", global_nnz);

return;
}

// Compute local matrix sizes
std::vector<int> local_size(num_procs);

Expand Down
13 changes: 0 additions & 13 deletions clients/include/testing_local_vector.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,12 +79,6 @@ void testing_local_vector_bad_args(void)
".*Assertion.*index != (NULL|__null)*");
}

// GetIndexValues
{
T* null_T = nullptr;
ASSERT_DEATH(vec.GetIndexValues(null_T), ".*Assertion.*values != (NULL|__null)*");
}

// SetIndexValues
{
T* null_T = nullptr;
Expand All @@ -98,13 +92,6 @@ void testing_local_vector_bad_args(void)
".*Assertion.*values != (NULL|__null)*");
}

// SetContinuousValues
{
T* null_T = nullptr;
ASSERT_DEATH(vec.SetContinuousValues(0, 0, null_T),
".*Assertion.*values != (NULL|__null)*");
}

// ExtractCoarseMapping
{
int* null_int = nullptr;
Expand Down
7 changes: 0 additions & 7 deletions clients/samples/cg-amg_mpi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,6 @@ int main(int argc, char* argv[])
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &num_procs);

// Check command line parameters
if(num_procs < 2)
{
std::cerr << "Expecting at least 2 MPI processes" << std::endl;
return -1;
}

if(argc < 2)
{
std::cerr << argv[0] << " <global_matrix>" << std::endl;
Expand Down
7 changes: 0 additions & 7 deletions clients/samples/cg_mpi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,13 +43,6 @@ int main(int argc, char* argv[])
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &num_procs);

// Check command line parameters
if(num_procs < 2)
{
std::cerr << "Expecting at least 2 MPI processes" << std::endl;
return -1;
}

if(argc < 2)
{
std::cerr << argv[0] << " <global_matrix>" << std::endl;
Expand Down
61 changes: 46 additions & 15 deletions src/base/global_matrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,8 @@ namespace rocalution
FATAL_ERROR(__FILE__, __LINE__);
#endif

this->pm_ = NULL;

this->object_name_ = "";

this->nnz_ = 0;
Expand Down Expand Up @@ -1518,14 +1520,20 @@ namespace rocalution
rGsize,
ordering);

// TODO asserts

LocalMatrix<ValueType> tmp;
tmp.CloneFrom(this->matrix_ghost_);
tmp.ConvertToCSR();
if(this->pm_ == NULL || this->pm_->num_procs_ == 1)
{
this->matrix_interior_.InitialPairwiseAggregation(
beta, nc, G, Gsize, rG, rGsize, ordering);
}
else
{
LocalMatrix<ValueType> tmp;
tmp.CloneFrom(this->matrix_ghost_);
tmp.ConvertToCSR();

this->matrix_interior_.InitialPairwiseAggregation(
tmp, beta, nc, G, Gsize, rG, rGsize, ordering);
this->matrix_interior_.InitialPairwiseAggregation(
tmp, beta, nc, G, Gsize, rG, rGsize, ordering);
}
}

template <typename ValueType>
Expand All @@ -1547,14 +1555,20 @@ namespace rocalution
rGsize,
ordering);

// TODO asserts

LocalMatrix<ValueType> tmp;
tmp.CloneFrom(this->matrix_ghost_);
tmp.ConvertToCSR();
if(this->pm_ == NULL || this->pm_->num_procs_ == 1)
{
this->matrix_interior_.FurtherPairwiseAggregation(
beta, nc, G, Gsize, rG, rGsize, ordering);
}
else
{
LocalMatrix<ValueType> tmp;
tmp.CloneFrom(this->matrix_ghost_);
tmp.ConvertToCSR();

this->matrix_interior_.FurtherPairwiseAggregation(
tmp, beta, nc, G, Gsize, rG, rGsize, ordering);
this->matrix_interior_.FurtherPairwiseAggregation(
tmp, beta, nc, G, Gsize, rG, rGsize, ordering);
}
}

template <typename ValueType>
Expand Down Expand Up @@ -1582,7 +1596,24 @@ namespace rocalution
assert(pm != NULL);
assert(rG != NULL);

// TODO asserts
if(this->pm_ == NULL || this->pm_->num_procs_ == 1)
{
this->matrix_interior_.CoarsenOperator(
&Ac->matrix_interior_, pm, nrow, ncol, G, Gsize, rG, rGsize);

pm->Clear();
pm->SetMPICommunicator(this->pm_->comm_);

pm->SetGlobalNrow(Ac->matrix_interior_.GetM());
pm->SetGlobalNcol(Ac->matrix_interior_.GetN());

pm->SetLocalNrow(Ac->matrix_interior_.GetM());
pm->SetLocalNcol(Ac->matrix_interior_.GetN());

Ac->SetParallelManager(*pm);

return;
}

#ifdef SUPPORT_MULTINODE
// MPI Requests for sync
Expand Down
2 changes: 2 additions & 0 deletions src/base/global_vector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ namespace rocalution
FATAL_ERROR(__FILE__, __LINE__);
#endif

this->pm_ = NULL;

this->object_name_ = "";
}

Expand Down
Loading

0 comments on commit ce712e0

Please sign in to comment.