Skip to content
This repository has been archived by the owner on May 16, 2024. It is now read-only.

Commit

Permalink
Merge pull request #81 from Cerebellum-Network/fix/cluster-api
Browse files Browse the repository at this point in the history
Cluster listing endpoint is fixed; Segregating vNodes by nodes within a cluster
  • Loading branch information
yahortsaryk authored Aug 11, 2023
2 parents 2cc609b + 6fdea4f commit 99a3f57
Show file tree
Hide file tree
Showing 7 changed files with 207 additions and 60 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/build-and-push.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ jobs:
rustup target add wasm32-unknown-unknown --toolchain nightly-2023-02-07
rustup component add rust-src --toolchain nightly-2023-02-07-unknown-linux-gnu
sudo apt-get install binaryen
cargo install cargo-dylint
cargo install dylint-link
cargo install cargo-dylint --version 2.1.11 --force --locked
cargo install dylint-link --version 2.1.11 --locked
cargo install cargo-contract --version 1.5.0 --force --locked
- name: Run tests
run: |
Expand Down
9 changes: 8 additions & 1 deletion bucket/ddc_bucket/cluster/entity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,14 @@ impl ink_storage::traits::PackedAllocate for Cluster {
pub struct ClusterInfo {
pub cluster_id: ClusterId,
pub cluster: Cluster,
pub cluster_v_nodes: Vec<VNodeToken>,
pub cluster_v_nodes: Vec<NodeVNodesInfo>,
}

#[derive(Clone, PartialEq, Encode, Decode)]
#[cfg_attr(feature = "std", derive(Debug, scale_info::TypeInfo))]
pub struct NodeVNodesInfo {
pub node_key: NodeKey,
pub v_nodes: Vec<VNodeToken>,
}

pub const CLUSTER_PARAMS_MAX_LEN: usize = 100_000;
Expand Down
31 changes: 24 additions & 7 deletions bucket/ddc_bucket/cluster/messages.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use ink_prelude::vec::Vec;
use crate::ddc_bucket::bucket::entity::BucketId;
use crate::ddc_bucket::cash::{Cash, Payable};
use crate::ddc_bucket::cdn_node::entity::{CdnNode, CdnNodeKey};
use crate::ddc_bucket::cluster::entity::{ClusterInfo, KB_PER_GB};
use crate::ddc_bucket::cluster::entity::{ClusterInfo, NodeVNodesInfo, KB_PER_GB};
use crate::ddc_bucket::node::entity::{Node, NodeKey, Resource};
use crate::ddc_bucket::perm::entity::Permission;
use crate::ddc_bucket::topology::store::VNodeToken;
Expand Down Expand Up @@ -412,7 +412,13 @@ impl DdcBucket {

pub fn message_cluster_get(&self, cluster_id: ClusterId) -> Result<ClusterInfo> {
let cluster = self.clusters.get(cluster_id)?;
let cluster_v_nodes = self.topology.get_v_nodes_by_cluster(cluster_id);

let mut cluster_v_nodes: Vec<NodeVNodesInfo> = Vec::new();
for node_key in cluster.nodes_keys.clone() {
let v_nodes = self.topology.get_v_nodes_by_node(node_key.clone());
let v_nodes_info = NodeVNodesInfo { node_key, v_nodes };
cluster_v_nodes.push(v_nodes_info)
}

Ok(ClusterInfo {
cluster_id,
Expand All @@ -428,19 +434,27 @@ impl DdcBucket {
filter_manager_id: Option<AccountId>,
) -> (Vec<ClusterInfo>, u32) {
let mut clusters = Vec::with_capacity(limit as usize);
for cluster_id in offset..offset + limit {
let cluster = match self.clusters.clusters.get(cluster_id) {
for idx in offset..offset + limit {
let cluster_id = match self.clusters.clusters_ids.get(idx as usize) {
None => break, // No more items, stop.
Some(cluster) => cluster,
Some(cluster_id) => *cluster_id,
};

let cluster = self.clusters.clusters.get(cluster_id).unwrap();

// Apply the filter if given.
if let Some(manager_id) = filter_manager_id {
if manager_id != cluster.manager_id {
continue; // Skip non-matches.
}
}

let cluster_v_nodes = self.topology.get_v_nodes_by_cluster(cluster_id);
let mut cluster_v_nodes: Vec<NodeVNodesInfo> = Vec::new();
for node_key in cluster.nodes_keys.clone() {
let v_nodes = self.topology.get_v_nodes_by_node(node_key.clone());
let v_nodes_info = NodeVNodesInfo { node_key, v_nodes };
cluster_v_nodes.push(v_nodes_info)
}

// Include the complete status of matched items.
let cluster_info = ClusterInfo {
Expand All @@ -451,7 +465,10 @@ impl DdcBucket {

clusters.push(cluster_info);
}
(clusters, self.clusters.next_cluster_id)
(
clusters,
self.clusters.clusters_ids.len().try_into().unwrap(),
)
}

pub fn message_cluster_distribute_revenues(&mut self, cluster_id: ClusterId) -> Result<()> {
Expand Down
16 changes: 15 additions & 1 deletion bucket/ddc_bucket/cluster/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,29 +2,40 @@

use super::entity::{Cluster, ClusterId, ClusterParams};
use crate::ddc_bucket::{AccountId, Error::*, Resource, Result};
use ink_prelude::vec::Vec;
use ink_storage::traits::{SpreadAllocate, SpreadLayout};
use ink_storage::Mapping;

#[derive(SpreadAllocate, SpreadLayout, Default)]
#[cfg_attr(feature = "std", derive(ink_storage::traits::StorageLayout, Debug))]
pub struct ClusterStore {
pub next_cluster_id: u32,
pub next_cluster_id: ClusterId,
pub clusters: Mapping<ClusterId, Cluster>,
pub clusters_ids: Vec<ClusterId>,
}

// https://use.ink/datastructures/storage-layout#packed-vs-non-packed-layout
// There is a buffer with only limited capacity (around 16KB in the default configuration) available.
pub const MAX_CLUSTERS_LEN_IN_VEC: usize = 3900;

impl ClusterStore {
pub fn create(
&mut self,
manager_id: AccountId,
cluster_params: ClusterParams,
resource_per_v_node: Resource,
) -> Result<ClusterId> {
if self.clusters_ids.len() + 1 > MAX_CLUSTERS_LEN_IN_VEC {
return Err(NodesSizeExceedsLimit);
}

let cluster_id = self.next_cluster_id;
self.next_cluster_id = self.next_cluster_id + 1;

let cluster = Cluster::new(manager_id, cluster_params, resource_per_v_node)?;

self.clusters.insert(&cluster_id, &cluster);
self.clusters_ids.push(cluster_id);
Ok(cluster_id)
}

Expand All @@ -43,5 +54,8 @@ impl ClusterStore {

pub fn remove(&mut self, cluster_id: ClusterId) {
self.clusters.remove(cluster_id);
if let Some(pos) = self.clusters_ids.iter().position(|x| *x == cluster_id) {
self.clusters_ids.remove(pos);
};
}
}
Loading

0 comments on commit 99a3f57

Please sign in to comment.