From 295d49de551d9b85dbff721f23c60f54bdf72cb5 Mon Sep 17 00:00:00 2001 From: salmantoor Date: Mon, 6 May 2024 21:30:04 +0000 Subject: [PATCH] Removeing incompatible files. --- Power-consumption-keras/Dockerfile | 16 -- Power-consumption-keras/bin/build.sh | 8 - Power-consumption-keras/bin/get_data | 21 -- Power-consumption-keras/bin/init_venv.sh | 9 - .../bin/init_venv_macos.sh | 11 - Power-consumption-keras/bin/split_data | 42 ---- Power-consumption-keras/client/entrypoint | 130 ---------- Power-consumption-keras/client/fedn.yaml | 5 - .../requirements-osx-m1.txt | 4 - Power-consumption-keras/requirements.txt | 3 - Power-consumption-pytorch/Dockerfile | 16 -- Power-consumption-pytorch/bin/build.sh | 8 - Power-consumption-pytorch/bin/get_data | 21 -- Power-consumption-pytorch/bin/init_venv.sh | 10 - .../bin/init_venv_macos.sh | 12 - Power-consumption-pytorch/bin/split_data | 42 ---- Power-consumption-pytorch/client/entrypoint | 237 ------------------ Power-consumption-pytorch/client/fedn.yaml | 5 - .../requirements-osx-m1.txt | 4 - Power-consumption-pytorch/requirements.txt | 4 - 20 files changed, 608 deletions(-) delete mode 100644 Power-consumption-keras/Dockerfile delete mode 100755 Power-consumption-keras/bin/build.sh delete mode 100755 Power-consumption-keras/bin/get_data delete mode 100755 Power-consumption-keras/bin/init_venv.sh delete mode 100755 Power-consumption-keras/bin/init_venv_macos.sh delete mode 100755 Power-consumption-keras/bin/split_data delete mode 100755 Power-consumption-keras/client/entrypoint delete mode 100644 Power-consumption-keras/client/fedn.yaml delete mode 100644 Power-consumption-keras/requirements-osx-m1.txt delete mode 100644 Power-consumption-keras/requirements.txt delete mode 100644 Power-consumption-pytorch/Dockerfile delete mode 100755 Power-consumption-pytorch/bin/build.sh delete mode 100755 Power-consumption-pytorch/bin/get_data delete mode 100755 Power-consumption-pytorch/bin/init_venv.sh delete mode 100755 Power-consumption-pytorch/bin/init_venv_macos.sh delete mode 100755 Power-consumption-pytorch/bin/split_data delete mode 100755 Power-consumption-pytorch/client/entrypoint delete mode 100644 Power-consumption-pytorch/client/fedn.yaml delete mode 100644 Power-consumption-pytorch/requirements-osx-m1.txt delete mode 100644 Power-consumption-pytorch/requirements.txt diff --git a/Power-consumption-keras/Dockerfile b/Power-consumption-keras/Dockerfile deleted file mode 100644 index e953af6..0000000 --- a/Power-consumption-keras/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.10.6-slim as base -LABEL maintainer="salman@scaleoutsystems.com" -WORKDIR /app -COPY requirements.txt . -RUN apt-get update \ - && apt-get install --no-install-recommends -y git \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && pip install git+https://github.com/scaleoutsystems/fedn.git@master#egg=fedn\&subdirectory=fedn \ - && pip install --no-cache-dir -r requirements.txt - - -FROM python:3.10.6-slim as build -COPY --from=base /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/ -COPY --from=base /usr/local/bin/fedn /usr/local/bin/ -WORKDIR /app diff --git a/Power-consumption-keras/bin/build.sh b/Power-consumption-keras/bin/build.sh deleted file mode 100755 index 44eda61..0000000 --- a/Power-consumption-keras/bin/build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -# Init seed -client/entrypoint init_seed - -# Make compute package -tar -czvf package.tgz client diff --git a/Power-consumption-keras/bin/get_data b/Power-consumption-keras/bin/get_data deleted file mode 100755 index 4c449d0..0000000 --- a/Power-consumption-keras/bin/get_data +++ /dev/null @@ -1,21 +0,0 @@ -#!./.mnist-keras/bin/python -import os - -import fire -import numpy as np -import tensorflow as tf - - -def get_data(out_dir='data'): - # Make dir if necessary - if not os.path.exists(out_dir): - os.mkdir(out_dir) - - # Download data - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() - np.savez(f'{out_dir}/mnist.npz', x_train=x_train, - y_train=y_train, x_test=x_test, y_test=y_test) - - -if __name__ == '__main__': - fire.Fire(get_data) diff --git a/Power-consumption-keras/bin/init_venv.sh b/Power-consumption-keras/bin/init_venv.sh deleted file mode 100755 index 926aaab..0000000 --- a/Power-consumption-keras/bin/init_venv.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -set -e - -# Init venv -python -m venv .power-consumption-keras - -# Pip deps -.power-consumption-keras/bin/pip install --upgrade pip -.power-consumption-keras/bin/pip install -r requirements.txt diff --git a/Power-consumption-keras/bin/init_venv_macos.sh b/Power-consumption-keras/bin/init_venv_macos.sh deleted file mode 100755 index be71b6d..0000000 --- a/Power-consumption-keras/bin/init_venv_macos.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -# Install virtualenv -python3 -m pip install virtualenv - -# Init venv -python3 -m virtualenv .power-consumption-keras - -# Pip deps -.power-consumption-keras/bin/pip install --upgrade pip -.power-consumption-keras/bin/pip install -r requirements-osx-m1.txt diff --git a/Power-consumption-keras/bin/split_data b/Power-consumption-keras/bin/split_data deleted file mode 100755 index bb583b6..0000000 --- a/Power-consumption-keras/bin/split_data +++ /dev/null @@ -1,42 +0,0 @@ -#!./.mnist-keras/bin/python -import os -from math import floor - -import fire -import numpy as np - - -def splitset(dataset, parts): - n = dataset.shape[0] - local_n = floor(n/parts) - result = [] - for i in range(parts): - result.append(dataset[i*local_n: (i+1)*local_n]) - return np.array(result) - - -def split(dataset='data/mnist.npz', outdir='data', n_splits=2): - # Load and convert to dict - package = np.load(dataset) - data = {} - for key, val in package.items(): - data[key] = splitset(val, n_splits) - - # Make dir if necessary - if not os.path.exists(f'{outdir}/clients'): - os.mkdir(f'{outdir}/clients') - - # Make splits - for i in range(n_splits): - subdir = f'{outdir}/clients/{str(i+1)}' - if not os.path.exists(subdir): - os.mkdir(subdir) - np.savez(f'{subdir}/mnist.npz', - x_train=data['x_train'][i], - y_train=data['y_train'][i], - x_test=data['x_test'][i], - y_test=data['y_test'][i]) - - -if __name__ == '__main__': - fire.Fire(split) diff --git a/Power-consumption-keras/client/entrypoint b/Power-consumption-keras/client/entrypoint deleted file mode 100755 index ed9663c..0000000 --- a/Power-consumption-keras/client/entrypoint +++ /dev/null @@ -1,130 +0,0 @@ -#!./.power-consumption-keras/bin/python - -import json -import os - -import docker -import fire -import numpy as np -import tensorflow as tf - -from fedn.utils.kerashelper import KerasHelper - - - -def _get_data_path(): - # Figure out FEDn client number from container name - client = docker.from_env() - container = client.containers.get(os.environ['HOSTNAME']) - number = container.name[-1] - - # Return data path - return f"/var/data/clients/{number}/power.npz" - - -def _compile_model(img_rows=28, img_cols=28): - # Set input shape - #input_shape = (img_rows, img_cols, 1) - - # Define model - opt = tf.keras.optimizers.SGD(lr=0.0001) - model = tf.keras.models.Sequential() - model.add(tf.keras.layers.Dense(64, input_dim=4, activation="relu")) - model.add(tf.keras.layers.Dense(32, activation="relu")) - model.add(tf.keras.layers.Dense(1, activation="linear")) - #model.summary() - model.compile(loss = "mse", optimizer = opt,metrics=['mae']) - - return model - - -def _load_data(data_path, is_train=True): - # Load data - if data_path is None: - data = np.load(_get_data_path()) - else: - data = np.load(data_path) - - if is_train: - X = data['x_train'] - y = data['y_train'] - else: - X = data['x_test'] - y = data['y_test'] - - return X, y - - -def init_seed(out_path='seed.npz'): - weights = _compile_model().get_weights() - helper = KerasHelper() - helper.save_model(weights, out_path) - - -def train(in_model_path, out_model_path, data_path=None, batch_size=500, epochs=1): - # Load data - x_train, y_train = _load_data(data_path) - - # Load model - model = _compile_model() - helper = KerasHelper() - weights = helper.load_model(in_model_path) - model.set_weights(weights) - - # Train - model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs) - - # Save - weights = model.get_weights() - helper.save_model(weights, out_model_path) - - -def validate(in_model_path, out_json_path, data_path=None): - # Load data - x_train, y_train = _load_data(data_path) - x_test, y_test = _load_data(data_path, is_train=False) - - # Load model - model = _compile_model() - helper = KerasHelper() - weights = helper.load_model(in_model_path) - model.set_weights(weights) - - # Evaluate - #model_score = model.evaluate(x_train, y_train) - #model_score_test = model.evaluate(x_test, y_test) - #y_pred = model.predict(x_test) - #y_pred = np.argmax(y_pred, axis=1) - - # Evaluate - - y_pred = model.predict(x_test) - mae_loss = tf.keras.losses.mean_absolute_error(y_test, y_pred).numpy() - - #print('mae_loss: ', np.mean(mae_loss)) - test_mae_loss = np.mean(mae_loss) - - y_pred = model.predict(x_test) - mse_loss = tf.keras.losses.mean_squared_error(y_test, y_pred).numpy() - - #print('mse_loss: ', np.mean(mse_loss)) - test_mse_loss = np.mean(mse_loss) - - # JSON schema - report = { - "test_mae": str(test_mae_loss), - "test_mse": str(test_mse_loss), - } - - # Save JSON - with open(out_json_path, "w") as fh: - fh.write(json.dumps(report)) - - -if __name__ == '__main__': - fire.Fire({ - 'init_seed': init_seed, - 'train': train, - 'validate': validate, - '_get_data_path': _get_data_path, # for testing - }) diff --git a/Power-consumption-keras/client/fedn.yaml b/Power-consumption-keras/client/fedn.yaml deleted file mode 100644 index f2e014a..0000000 --- a/Power-consumption-keras/client/fedn.yaml +++ /dev/null @@ -1,5 +0,0 @@ -entry_points: - train: - command: python entrypoint train $ENTRYPOINT_OPTS - validate: - command: python entrypoint validate $ENTRYPOINT_OPTS diff --git a/Power-consumption-keras/requirements-osx-m1.txt b/Power-consumption-keras/requirements-osx-m1.txt deleted file mode 100644 index ae6990f..0000000 --- a/Power-consumption-keras/requirements-osx-m1.txt +++ /dev/null @@ -1,4 +0,0 @@ -tensorflow-macos==2.9.2 -tensorflow-metal==0.5.1 -fire==0.3.1 -docker==5.0.2 diff --git a/Power-consumption-keras/requirements.txt b/Power-consumption-keras/requirements.txt deleted file mode 100644 index 5a08a31..0000000 --- a/Power-consumption-keras/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -tensorflow==2.8.0 -fire==0.3.1 -docker==5.0.2 \ No newline at end of file diff --git a/Power-consumption-pytorch/Dockerfile b/Power-consumption-pytorch/Dockerfile deleted file mode 100644 index e953af6..0000000 --- a/Power-consumption-pytorch/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.10.6-slim as base -LABEL maintainer="salman@scaleoutsystems.com" -WORKDIR /app -COPY requirements.txt . -RUN apt-get update \ - && apt-get install --no-install-recommends -y git \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* \ - && pip install git+https://github.com/scaleoutsystems/fedn.git@master#egg=fedn\&subdirectory=fedn \ - && pip install --no-cache-dir -r requirements.txt - - -FROM python:3.10.6-slim as build -COPY --from=base /usr/local/lib/python3.10/site-packages/ /usr/local/lib/python3.10/site-packages/ -COPY --from=base /usr/local/bin/fedn /usr/local/bin/ -WORKDIR /app diff --git a/Power-consumption-pytorch/bin/build.sh b/Power-consumption-pytorch/bin/build.sh deleted file mode 100755 index 44eda61..0000000 --- a/Power-consumption-pytorch/bin/build.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash -set -e - -# Init seed -client/entrypoint init_seed - -# Make compute package -tar -czvf package.tgz client diff --git a/Power-consumption-pytorch/bin/get_data b/Power-consumption-pytorch/bin/get_data deleted file mode 100755 index 4c449d0..0000000 --- a/Power-consumption-pytorch/bin/get_data +++ /dev/null @@ -1,21 +0,0 @@ -#!./.mnist-keras/bin/python -import os - -import fire -import numpy as np -import tensorflow as tf - - -def get_data(out_dir='data'): - # Make dir if necessary - if not os.path.exists(out_dir): - os.mkdir(out_dir) - - # Download data - (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() - np.savez(f'{out_dir}/mnist.npz', x_train=x_train, - y_train=y_train, x_test=x_test, y_test=y_test) - - -if __name__ == '__main__': - fire.Fire(get_data) diff --git a/Power-consumption-pytorch/bin/init_venv.sh b/Power-consumption-pytorch/bin/init_venv.sh deleted file mode 100755 index ad4155c..0000000 --- a/Power-consumption-pytorch/bin/init_venv.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -# Init venv -python -m venv .power-consumption-pytorch - -# Pip deps -.power-consumption-pytorch/bin/pip install --upgrade pip -.power-consumption-pytorch/bin/pip install -e /home/ubuntu/fedn/fedn -.power-consumption-pytorch/bin/pip install -r requirements.txt --no-cache-dir diff --git a/Power-consumption-pytorch/bin/init_venv_macos.sh b/Power-consumption-pytorch/bin/init_venv_macos.sh deleted file mode 100755 index 91eed61..0000000 --- a/Power-consumption-pytorch/bin/init_venv_macos.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -# Install virtualenv -python3 -m pip install virtualenv - -# Init venv -python3 -m virtualenv .power-consumption-pytorch - -# Pip deps -.power-consumption-pytorch/bin/pip install --upgrade pip -.power-consumption-pytorch/bin/pip install -e /home/ubuntu/fedn/fedn -.power-consumption-pytorch/bin/pip install -r requirements-osx-m1.txt diff --git a/Power-consumption-pytorch/bin/split_data b/Power-consumption-pytorch/bin/split_data deleted file mode 100755 index bb583b6..0000000 --- a/Power-consumption-pytorch/bin/split_data +++ /dev/null @@ -1,42 +0,0 @@ -#!./.mnist-keras/bin/python -import os -from math import floor - -import fire -import numpy as np - - -def splitset(dataset, parts): - n = dataset.shape[0] - local_n = floor(n/parts) - result = [] - for i in range(parts): - result.append(dataset[i*local_n: (i+1)*local_n]) - return np.array(result) - - -def split(dataset='data/mnist.npz', outdir='data', n_splits=2): - # Load and convert to dict - package = np.load(dataset) - data = {} - for key, val in package.items(): - data[key] = splitset(val, n_splits) - - # Make dir if necessary - if not os.path.exists(f'{outdir}/clients'): - os.mkdir(f'{outdir}/clients') - - # Make splits - for i in range(n_splits): - subdir = f'{outdir}/clients/{str(i+1)}' - if not os.path.exists(subdir): - os.mkdir(subdir) - np.savez(f'{subdir}/mnist.npz', - x_train=data['x_train'][i], - y_train=data['y_train'][i], - x_test=data['x_test'][i], - y_test=data['y_test'][i]) - - -if __name__ == '__main__': - fire.Fire(split) diff --git a/Power-consumption-pytorch/client/entrypoint b/Power-consumption-pytorch/client/entrypoint deleted file mode 100755 index c5bdb76..0000000 --- a/Power-consumption-pytorch/client/entrypoint +++ /dev/null @@ -1,237 +0,0 @@ -#!./.power-consumption-pytorch/bin/python - -import collections -import json -import math -import os -from random import randint -from time import sleep - -import docker -import fire -import numpy as np -import torch -#from fedn.utils.pytorchhelper import PytorchHelper -from fedn.utils.helpers import get_helper, save_metadata, save_metrics - -from torch.nn import Linear -from torch.nn import ReLU -from torch.nn import Sigmoid -from torch.nn import Module -from torch.optim import SGD - -from torch.nn.init import kaiming_uniform_ -from torch.nn.init import xavier_uniform_ - -def _get_data_path(): - # Figure out FEDn client number from container name - client = docker.from_env() - container = client.containers.get(os.environ['HOSTNAME']) - number = container.name[-1] - - # Return data path - return f"/var/data/clients/{number}/power.npz" - -def _compile_model(): - - - class Net(torch.nn.Module): - def __init__(self): - super(Net, self).__init__() - - self.hidden1 = torch.nn.Linear(4, 64) - kaiming_uniform_(self.hidden1.weight, nonlinearity='relu') - self.act1 = ReLU() - - self.hidden2 = Linear(64, 32) - kaiming_uniform_(self.hidden2.weight, nonlinearity='relu') - self.act2 = ReLU() - - self.hidden3 = Linear(32, 1) - xavier_uniform_(self.hidden3.weight) - - - def forward(self, x): - - # input to first hidden layer - x = self.hidden1(x) - x = self.act1(x) - - # second hidden layer - x = self.hidden2(x) - x = self.act2(x) - - # third hidden layer and output - x = self.hidden3(x) - #x = self.act3(x) - - return x - - # Return model - return Net() - - -def _load_model(model_path): - - #helper = PytorchHelper() - helper = get_helper('pytorchhelper') - weights_np = helper.load(model_path) - weights = collections.OrderedDict() - for w in weights_np: - weights[w] = torch.tensor(weights_np[w]) - model = _compile_model() - model.load_state_dict(weights) - model.eval() - return model - - -def _load_data(data_path, is_train=True): - # Load data - if data_path is None: - data = np.load(_get_data_path()) - else: - data = np.load(data_path) - - if is_train: - X = data['x_train'] - y = data['y_train'] - else: - X = data['x_test'] - y = data['y_test'] - - return X, y - - -def init_seed(out_path='seed.npz'): - - #Init and save - model = _compile_model() - _save_model(model, out_path) - -def _save_model(model, out_path): - - weights = model.state_dict() - weights_np = collections.OrderedDict() - for w in weights: - weights_np[w] = weights[w].cpu().detach().numpy() - #helper = PytorchHelper() - helper = get_helper('pytorchhelper') - helper.save(weights, out_path) - - -def train(in_model_path, out_model_path, data_path=None, batch_size=100, epochs=3, lr=0.001): - # Load data - x_train, y_train = _load_data(data_path) - - # Load model - model = _load_model(in_model_path) - - # Train - optimizer = torch.optim.SGD(model.parameters(), lr=lr) - - print ('len(x_train): ',len(x_train)) - - n_batches = int(math.ceil(len(x_train) / batch_size)) - print ('n_batches:', n_batches) - criterion = torch.nn.L1Loss() - - print ('model shape: ', model) - - - for e in range(epochs): # epoch loop - - #sleep(randint(60,120)) - - for b in range(n_batches): # batch loop - # Retrieve current batch - batch_x_tmp = torch.from_numpy(x_train[b * batch_size:(b + 1) * batch_size]) - batch_x = torch.tensor(batch_x_tmp, dtype=torch.float32) - - batch_y_tmp = torch.from_numpy(np.expand_dims(y_train[b * batch_size:(b + 1) * batch_size],-1)) - batch_y = torch.tensor(batch_y_tmp, dtype=torch.float32) - - # Train on batch - optimizer.zero_grad() - #print('batch x shape: ', batch_x.shape ) - #print('batch y shape: ', batch_y.shape ) - outputs = model(batch_x) - - loss = criterion(outputs, batch_y) - loss.backward() - optimizer.step() - # Log - if b % 100 == 0: - print( - f"Epoch {e}/{epochs-1} | Batch: {b}/{n_batches-1} | Loss: {loss.item()}") - - # Metadata needed for aggregation server side - metadata = { - 'num_examples': len(x_train), - 'batch_size': batch_size, - 'epochs': epochs, - 'lr': lr - } - - # Save JSON metadata file - save_metadata(metadata, out_model_path) - - - # Save - _save_model(model, out_model_path) - -def validate(in_model_path, out_json_path, data_path=None): - # Load data - x_train, y_train = _load_data(data_path) - x_test, y_test = _load_data(data_path, is_train=False) - - # Load model - model = _load_model(in_model_path) - - # Evaluate - criterion_mae = torch.nn.L1Loss() - criterion_mse = torch.nn.MSELoss() - with torch.no_grad(): - - x_train_t = torch.tensor(x_train, dtype=torch.float32) - train_out = model(x_train_t) - - y_train = torch.from_numpy(np.expand_dims(y_train,-1)) - y_train_t = torch.tensor(y_train, dtype=torch.float32) - - training_loss_mae = criterion_mae(train_out, y_train_t) - training_loss_mse = criterion_mse(train_out, y_train_t) - - x_test_t = torch.tensor(x_test, dtype=torch.float32) - test_out = model(x_test_t) - - y_test = torch.from_numpy(np.expand_dims(y_test,-1)) - y_test_t = torch.tensor(y_test, dtype=torch.float32) - - test_loss_mae = criterion_mae(test_out, y_test_t) - test_loss_mse = criterion_mse(test_out, y_test_t) - - #print('test_mae: ', test_loss_mae.item()) - #print('test_mse: ', test_loss_mse.item()) - #print('training_mae: ', training_loss_mae.item()) - #print('training_mse: ', training_loss_mse.item()) - - # JSON schema - report = { - "test_mae": str(test_loss_mae.item()), - "test_mse": str(test_loss_mse.item()), - "training_mae": str(training_loss_mae.item()), - "training_mse": str(training_loss_mse.item()), - - } - - # Save JSON - save_metrics(report, out_json_path) - - -if __name__ == '__main__': - fire.Fire({ - 'init_seed': init_seed, - 'train': train, - 'validate': validate, - '_get_data_path': _get_data_path, # for testing - }) diff --git a/Power-consumption-pytorch/client/fedn.yaml b/Power-consumption-pytorch/client/fedn.yaml deleted file mode 100644 index f2e014a..0000000 --- a/Power-consumption-pytorch/client/fedn.yaml +++ /dev/null @@ -1,5 +0,0 @@ -entry_points: - train: - command: python entrypoint train $ENTRYPOINT_OPTS - validate: - command: python entrypoint validate $ENTRYPOINT_OPTS diff --git a/Power-consumption-pytorch/requirements-osx-m1.txt b/Power-consumption-pytorch/requirements-osx-m1.txt deleted file mode 100644 index 0bf7a6e..0000000 --- a/Power-consumption-pytorch/requirements-osx-m1.txt +++ /dev/null @@ -1,4 +0,0 @@ -torch==1.13.1 -torchvision==0.14.1 -fire==0.3.1 -docker==6.1.1 diff --git a/Power-consumption-pytorch/requirements.txt b/Power-consumption-pytorch/requirements.txt deleted file mode 100644 index 0bf7a6e..0000000 --- a/Power-consumption-pytorch/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -torch==1.13.1 -torchvision==0.14.1 -fire==0.3.1 -docker==6.1.1