diff --git a/make.sh b/make.sh index c2ecdad..1db54ec 100755 --- a/make.sh +++ b/make.sh @@ -8,6 +8,7 @@ # # make.sh [-h] [-i] [-s] [-N] [-U] \ # [-O ] \ +# [-F ] \ # [-S ] \ # [local|fnndsc[:dev]] # @@ -23,9 +24,13 @@ # # unmake.sh ; sudo rm -fr CHRIS_REMOTE_FS; rm -fr CHRIS_REMOTE_FS; make.sh # -# Run full pfcon instantiation operating in-network on Swarm: +# Run full pfcon instantiation operating in-network on Swarm using Swift storage: # -# unmake.sh -N; sudo rm -fr CHRIS_REMOTE_FS; rm -fr CHRIS_REMOTE_FS; make.sh -N +# unmake.sh -N -F swift; sudo rm -fr CHRIS_REMOTE_FS; rm -fr CHRIS_REMOTE_FS; make.sh -N -F swift +# +# Run full pfcon instantiation operating in-network on Swarm using mounted filesystem storage: +# +# unmake.sh -N -F filesystem; sudo rm -fr CHRIS_REMOTE_FS; rm -fr CHRIS_REMOTE_FS; make.sh -N -F filesystem # # Skip the intro: # @@ -61,6 +66,12 @@ # Optional set pfcon to operate in-network mode (using a swift storage instead of # a zip file). # +# -F +# +# Explicitly set the storage environment. This option must be swift or filesystem +# for pfcon operating in-network mode. For pfcon operating in out-of-network mode +# it must be set to zipfile (default). +# # -U # # Optional skip the UNIT tests. @@ -90,14 +101,15 @@ source ./cparse.sh declare -i STEP=0 ORCHESTRATOR=swarm +STORAGE=zipfile HERE=$(pwd) print_usage () { - echo "Usage: ./make.sh [-h] [-i] [-s] [-N] [-U] [-O ] [-S ] [local|fnndsc[:dev]]" + echo "Usage: ./make.sh [-h] [-i] [-s] [-N] [-F ] [-U] [-O ] [-S ] [local|fnndsc[:dev]]" exit 1 } -while getopts ":hsiNUO:S:" opt; do +while getopts ":hsiNUF:O:S:" opt; do case $opt in h) print_usage ;; @@ -107,6 +119,12 @@ while getopts ":hsiNUO:S:" opt; do ;; N) b_pfconInNetwork=1 ;; + F) STORAGE=$OPTARG + if ! [[ "$STORAGE" =~ ^(swift|filesystem|zipfile)$ ]]; then + echo "Invalid value for option -- F" + print_usage + fi + ;; U) b_skipUnitTests=1 ;; O) ORCHESTRATOR=$OPTARG @@ -157,10 +175,15 @@ title -d 1 "Setting global exports..." fi if (( b_pfconInNetwork )) ; then echo -e "PFCON_INNETWORK=True" | ./boxes.sh + if [[ $STORAGE == 'zipfile' ]]; then + echo -e "Need to pass '-F ' when PFCON_INNETWORK=True" | ./boxes.sh + exit 1 + fi else echo -e "PFCON_INNETWORK=False" | ./boxes.sh fi echo -e "ORCHESTRATOR=$ORCHESTRATOR" | ./boxes.sh + echo -e "STORAGE=$STORAGE" | ./boxes.sh echo -e "exporting STOREBASE=$STOREBASE " | ./boxes.sh export STOREBASE=$STOREBASE export SOURCEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" @@ -187,7 +210,11 @@ windowBottom title -d 1 "Building :dev" cd $HERE if (( b_pfconInNetwork )) ; then - CMD="docker compose -f swarm/docker-compose_dev_innetwork.yml build" + if [[ $STORAGE == 'swift' ]]; then + CMD="docker compose -f swarm/docker-compose_dev_innetwork.yml build" + elif [[ $STORAGE == 'filesystem' ]]; then + CMD="docker compose -f swarm/docker-compose_dev_innetwork_fs.yml build" + fi else CMD="docker compose -f swarm/docker-compose_dev.yml build" fi @@ -232,8 +259,13 @@ windowBottom title -d 1 "Starting pfcon containerized dev environment on $ORCHESTRATOR" if [[ $ORCHESTRATOR == swarm ]]; then if (( b_pfconInNetwork )) ; then - echo "docker stack deploy -c swarm/docker-compose_dev_innetwork.yml pfcon_dev_stack" | ./boxes.sh ${LightCyan} - docker stack deploy -c swarm/docker-compose_dev_innetwork.yml pfcon_dev_stack + if [[ $STORAGE == 'swift' ]]; then + echo "docker stack deploy -c swarm/docker-compose_dev_innetwork.yml pfcon_dev_stack" | ./boxes.sh ${LightCyan} + docker stack deploy -c swarm/docker-compose_dev_innetwork.yml pfcon_dev_stack + elif [[ $STORAGE == 'filesystem' ]]; then + echo "docker stack deploy -c swarm/docker-compose_dev_innetwork_fs.yml pfcon_dev_stack" | ./boxes.sh ${LightCyan} + docker stack deploy -c swarm/docker-compose_dev_innetwork_fs.yml pfcon_dev_stack + fi else echo "docker stack deploy -c swarm/docker-compose_dev.yml pfcon_dev_stack" | ./boxes.sh ${LightCyan} docker stack deploy -c swarm/docker-compose_dev.yml pfcon_dev_stack @@ -274,7 +306,11 @@ if (( ! b_skipUnitTests )) ; then sleep 5 if [[ $ORCHESTRATOR == swarm ]]; then if (( b_pfconInNetwork )) ; then - docker exec $pfcon_dev pytest tests/test_resources_innetwork.py --color=yes + if [[ $STORAGE == 'swift' ]]; then + docker exec $pfcon_dev pytest tests/test_resources_innetwork.py --color=yes + elif [[ $STORAGE == 'filesystem' ]]; then + docker exec $pfcon_dev pytest tests/test_resources_innetwork_fs.py --color=yes + fi else docker exec $pfcon_dev pytest tests/test_resources.py --color=yes fi diff --git a/pfcon/config.py b/pfcon/config.py index 9d3cd1a..d7b765c 100755 --- a/pfcon/config.py +++ b/pfcon/config.py @@ -27,13 +27,16 @@ def __init__(self): if self.PFCON_INNETWORK: self.STORAGE_ENV = env('STORAGE_ENV', 'swift') - if self.STORAGE_ENV != 'swift': + if self.STORAGE_ENV not in ('swift', 'filesystem'): raise ValueError(f"Unsupported value '{self.STORAGE_ENV}' for STORAGE_ENV") else: self.STORAGE_ENV = env('STORAGE_ENV', 'zipfile') if self.STORAGE_ENV != 'zipfile': raise ValueError(f"Unsupported value '{self.STORAGE_ENV}' for STORAGE_ENV") + if self.STORAGE_ENV == 'filesystem': + self.FILESYSTEM_BASEDIR = env('FILESYSTEM_BASEDIR', '/filesystem') + self.STORE_BASE = env('STOREBASE', '/var/local/storeBase') self.env = env diff --git a/pfcon/filesystem_storage.py b/pfcon/filesystem_storage.py new file mode 100755 index 0000000..3f9ce6d --- /dev/null +++ b/pfcon/filesystem_storage.py @@ -0,0 +1,91 @@ +""" +Handle filesystem-based (eg. mount directory) storage. This is used when pfcon is +in-network and configured to directly copy the data from a filesystem. +""" + +import logging +import datetime +import os +import json +import io +import shutil + + +from .base_storage import BaseStorage + + +logger = logging.getLogger(__name__) + + +class FileSystemStorage(BaseStorage): + + def __init__(self, config): + + super().__init__(config) + + self.base_dir = config.get('FILESYSTEM_BASEDIR') + + + def store_data(self, job_id, job_incoming_dir, data, **kwargs): + """ + Copy all the files/folders under each input folder in the specified data list + into the specified incoming directory. + """ + nfiles = 0 + for rel_path in data: + abs_path = os.path.join(self.base_dir, rel_path.strip('/')) + + for root, dirs, files in os.walk(abs_path): + local_path = root.replace(abs_path, job_incoming_dir, 1) + os.makedirs(local_path, exist_ok=True) + + for filename in files: + fs_file_path = os.path.join(root, filename) + try: + shutil.copy(fs_file_path, local_path) + except Exception as e: + logger.error(f'Failed to copy file {fs_file_path} for ' + f'job {job_id}, detail: {str(e)}') + raise + nfiles += 1 + + logger.info(f'{nfiles} files copied from file system for job {job_id}') + return { + 'jid': job_id, + 'nfiles': nfiles, + 'timestamp': f'{datetime.datetime.now()}', + 'path': job_incoming_dir + } + + def get_data(self, job_id, job_outgoing_dir, **kwargs): + """ + Copy output files/folders from the specified outgoing directory into the folder + specified by job_output_path keyword argument (relative to the FS base dir). + Then create job json file ready for transmission to a remote origin. The json + file contains the job_output_path prefix and the list of relative file paths. + """ + job_output_path = kwargs['job_output_path'] + fs_output_path = os.path.join(self.base_dir, job_output_path) + fs_rel_file_paths = [] + + for root, dirs, files in os.walk(job_outgoing_dir): + rel_path = os.path.relpath(root, job_outgoing_dir) + if rel_path == '.': + rel_path = '' + fs_path = os.path.join(fs_output_path, rel_path) + os.makedirs(fs_path, exist_ok=True) + + for filename in files: + local_file_path = os.path.join(root, filename) + if not os.path.islink(local_file_path): + try: + shutil.copy(local_file_path, fs_path) + except Exception as e: + logger.error(f'Failed to copy file {local_file_path} for ' + f'job {job_id}, detail: {str(e)}') + raise + fs_rel_file_paths.append(os.path.join(rel_path, filename)) + + data = {'job_output_path': job_output_path, + 'rel_file_paths': fs_rel_file_paths} + return io.BytesIO(json.dumps(data).encode()) diff --git a/pfcon/resources.py b/pfcon/resources.py index 590f780..40af6cb 100755 --- a/pfcon/resources.py +++ b/pfcon/resources.py @@ -12,6 +12,8 @@ from .services import PmanService, ServiceException from .zip_file_storage import ZipFileStorage from .swift_storage import SwiftStorage +from .filesystem_storage import FileSystemStorage + logger = logging.getLogger(__name__) @@ -101,6 +103,15 @@ def post(self): logger.error(f'Error while fetching files from swift and ' f'storing job {job_id} data, detail: {str(e)}') abort(400, message='input_dirs: Error fetching files from swift') + + elif self.storage_env == 'filesystem': + storage = FileSystemStorage(app.config) + try: + d_info = storage.store_data(job_id, incoming_dir, args.input_dirs) + except Exception as e: + logger.error(f'Error while copying files from filesystem and ' + f'storing job {job_id} data, detail: {str(e)}') + abort(400, message='input_dirs: Error copying files from filesystem') else: if self.storage_env == 'zipfile': storage = ZipFileStorage(app.config) @@ -165,6 +176,9 @@ def delete(self, job_id): if self.pfcon_innetwork: if self.storage_env == 'swift': storage = SwiftStorage(app.config) + + elif self.storage_env == 'filesystem': + storage = FileSystemStorage(app.config) else: if self.storage_env == 'zipfile': storage = ZipFileStorage(app.config) @@ -214,13 +228,18 @@ def get(self, job_id): if self.pfcon_innetwork: job_output_path = request.args.get('job_output_path') + if job_output_path: + storage = None if self.storage_env == 'swift': storage = SwiftStorage(app.config) - content = storage.get_data(job_id, outgoing_dir, - job_output_path=job_output_path) - download_name = f'{job_id}.json' - mimetype = 'application/json' + elif self.storage_env == 'filesystem': + storage = FileSystemStorage(app.config) + + content = storage.get_data(job_id, outgoing_dir, + job_output_path=job_output_path.lstrip('/')) + download_name = f'{job_id}.json' + mimetype = 'application/json' else: # if no query parameter passed then the job's zip file is returned storage = ZipFileStorage(app.config) diff --git a/pfcon/zip_file_storage.py b/pfcon/zip_file_storage.py index 52b4da9..06348cd 100755 --- a/pfcon/zip_file_storage.py +++ b/pfcon/zip_file_storage.py @@ -59,8 +59,8 @@ def get_data(self, job_id, job_outgoing_dir, **kwargs): except Exception as e: logger.error(f'Failed to read file {local_file_path} for ' f'job {job_id}, detail: {str(e)}') - else: - nfiles += 1 + raise + nfiles += 1 memory_zip_file.seek(0) logger.info(f'{nfiles} files compressed for job {job_id}') diff --git a/swarm/docker-compose_dev_innetwork_fs.yml b/swarm/docker-compose_dev_innetwork_fs.yml new file mode 100755 index 0000000..86b27fd --- /dev/null +++ b/swarm/docker-compose_dev_innetwork_fs.yml @@ -0,0 +1,73 @@ +# https://docs.docker.com/compose/yml/ +# Each service defined in docker-compose.yml must specify exactly one of +# image or build. Other keys are optional, and are analogous to their +# docker run command-line counterparts. +# +# As with docker run, options specified in the Dockerfile (e.g., CMD, +# EXPOSE, VOLUME, ENV) are respected by default - you don't need to +# specify them again in docker-compose.yml. +# + +version: '3.7' + +services: + pfcon: + image: localhost:5000/fnndsc/pfcon:dev + build: + context: .. + args: + ENVIRONMENT: local + stdin_open: true # docker run -i + tty: true # docker run -t + # We need to mount a physical dir in the HOST onto the key store in pfcon. This dir + # is given by the STOREBASE env variable substitution. The keystore can be specified + # by the --storeBase flag during development. + command: ["python", "-m", "pfcon"] + environment: + - APPLICATION_MODE=development + - PFCON_INNETWORK=true + - STORAGE_ENV=filesystem + volumes: + - fs_storage_dev:/filesystem + - ${STOREBASE:?}:/var/local/storeBase:z + - ../pfcon:/app/pfcon:z + - ../tests:/app/tests:z + ports: + - "30006:5005" + depends_on: + - pman + - swift_service + networks: + - remote + labels: + name: "pfcon" + role: "pfcon service" + + pman: + image: ${PMANREPO:?}/pman + # Since pman spins off containers of its own it needs to mount storeBase dir (where + # pfcon shares the data) into the spawned container. This directory is passed in the + # STOREBASE env variable. + environment: + - STORAGE_TYPE=host + - STOREBASE + - SECRET_KEY="w1kxu^l=@pnsf!5piqz6!!5kdcdpo79y6jebbp+2244yjm*#+k" + - CONTAINER_ENV=swarm + volumes: + - /var/run/docker.sock:/var/run/docker.sock:z + deploy: + placement: + constraints: + - "node.role==manager" + networks: + - remote + labels: + name: "pman" + role: "pman service" + + +networks: + remote: + +volumes: + fs_storage_dev: diff --git a/tests/test_resources_innetwork_fs.py b/tests/test_resources_innetwork_fs.py new file mode 100755 index 0000000..4bdfa55 --- /dev/null +++ b/tests/test_resources_innetwork_fs.py @@ -0,0 +1,233 @@ + +import logging +from pathlib import Path +import shutil +import os +import io +import time +import zipfile +import json +from unittest import TestCase +from unittest import mock, skip + +from flask import url_for + +from pfcon.app import create_app +from pfcon.services import PmanService, ServiceException + + +class ResourceTests(TestCase): + """ + Base class for all the resource tests. + """ + def setUp(self): + # avoid cluttered console output (for instance logging all the http requests) + logging.disable(logging.WARNING) + + self.app = create_app({'PFCON_INNETWORK': True, + 'STORAGE_ENV': 'filesystem', + 'FILESYSTEM_BASEDIR': '/filesystem' + }) + self.client = self.app.test_client() + with self.app.test_request_context(): + # create a header with authorization token + url = url_for('api.auth') + creds = { + 'pfcon_user': self.app.config.get('PFCON_USER'), + 'pfcon_password': self.app.config.get('PFCON_PASSWORD') + } + response = self.client.post(url, data=json.dumps(creds), content_type='application/json') + self.headers = {'Authorization': 'Bearer ' + response.json['token']} + + self.fs_base_dir = self.app.config.get('FILESYSTEM_BASEDIR') + self.user_dir = os.path.join(self.fs_base_dir, 'foo') + + # copy a file to the filesystem storage input path + self.fs_input_path = os.path.join(self.user_dir, 'feed/input') + self.fs_output_path = os.path.join(self.user_dir, 'feed/output') + os.makedirs(self.fs_input_path, exist_ok=True) + os.makedirs(self.fs_output_path, exist_ok=True) + with open(self.fs_input_path + '/test.txt', 'w') as f: + f.write('Test file') + + self.job_dir = '' + + def tearDown(self): + if os.path.isdir(self.job_dir): + shutil.rmtree(self.job_dir) + + # delete files from filesystem storage + if os.path.isdir(self.user_dir): + shutil.rmtree(self.user_dir) + + # re-enable logging + logging.disable(logging.NOTSET) + + +class TestJobList(ResourceTests): + """ + Test the JobList resource. + """ + def setUp(self): + super().setUp() + + with self.app.test_request_context(): + self.url = url_for('api.joblist') + + def test_get(self): + response = self.client.get(self.url, headers=self.headers) + self.assertEqual(response.status_code, 200) + self.assertTrue('server_version' in response.json) + self.assertTrue(response.json['pfcon_innetwork']) + self.assertEqual(response.json['storage_env'], 'filesystem') + + def test_post(self): + job_id = 'chris-jid-1' + self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id) + + data = { + 'jid': job_id, + 'entrypoint': ['python3', '/usr/local/bin/simplefsapp'], + 'args': ['--saveinputmeta', '--saveoutputmeta', '--dir', '/share/incoming'], + 'auid': 'cube', + 'number_of_workers': '1', + 'cpu_limit': '1000', + 'memory_limit': '200', + 'gpu_limit': '0', + 'image': 'fnndsc/pl-simplefsapp', + 'type': 'fs', + 'input_dirs': [os.path.relpath(self.fs_input_path, self.fs_base_dir)] + } + # make the POST request + response = self.client.post(self.url, data=data, headers=self.headers) + self.assertEqual(response.status_code, 201) + self.assertIn('compute', response.json) + self.assertIn('data', response.json) + self.assertEqual(response.json['data']['nfiles'], 1) + + with self.app.test_request_context(): + pman = PmanService.get_service_obj() + for _ in range(10): + time.sleep(3) + d_compute_response = pman.get_job(job_id) + if d_compute_response['status'] == 'finishedSuccessfully': break + self.assertEqual(d_compute_response['status'], 'finishedSuccessfully') + + # cleanup swarm job + pman.delete_job(job_id) + + +class TestJob(ResourceTests): + """ + Test the Job resource. + """ + def setUp(self): + super().setUp() + + self.compute_data = { + 'entrypoint': ['python3', '/usr/local/bin/simplefsapp'], + 'args': ['--saveinputmeta', '--saveoutputmeta', '--dir', 'cube'], + 'args_path_flags': ['--dir'], + 'auid': 'cube', + 'number_of_workers': '1', + 'cpu_limit': '1000', + 'memory_limit': '200', + 'gpu_limit': '0', + 'image': 'fnndsc/pl-simplefsapp', + 'type': 'fs' + } + + def test_get(self): + job_id = 'chris-jid-2' + self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id) + incoming = os.path.join(self.job_dir, 'incoming') + Path(incoming).mkdir(parents=True, exist_ok=True) + outgoing = os.path.join(self.job_dir, 'outgoing') + Path(outgoing).mkdir(parents=True, exist_ok=True) + with open(os.path.join(incoming, 'test.txt'), 'w') as f: + f.write('job input test file') + + with self.app.test_request_context(): + # create job + url = url_for('api.job', job_id=job_id) + pman = PmanService.get_service_obj() + pman.run_job(job_id, self.compute_data) + + # make the GET requests + for _ in range(10): + time.sleep(3) + response = self.client.get(url, headers=self.headers) + if response.json['compute']['status'] == 'finishedSuccessfully': break + self.assertEqual(response.status_code, 200) + self.assertEqual(response.json['compute']['status'], 'finishedSuccessfully') + + # cleanup swarm job + pman.delete_job(job_id) + + def test_delete(self): + job_id = 'chris-jid-3' + self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id) + incoming = os.path.join(self.job_dir, 'incoming') + Path(incoming).mkdir(parents=True, exist_ok=True) + outgoing = os.path.join(self.job_dir, 'outgoing') + Path(outgoing).mkdir(parents=True, exist_ok=True) + with open(os.path.join(incoming, 'test.txt'), 'w') as f: + f.write('job input test file') + + with self.app.test_request_context(): + # create job + url = url_for('api.job', job_id=job_id) + pman = PmanService.get_service_obj() + pman.run_job(job_id, self.compute_data) + + # make the DELETE request + time.sleep(3) + response = self.client.delete(url, headers=self.headers) + self.assertEqual(response.status_code, 204) + + +class TestJobFile(ResourceTests): + """ + Test the JobFile resource. + """ + + def test_get_without_query_parameters(self): + job_id = 'chris-jid-4' + self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id) + + with self.app.test_request_context(): + url = url_for('api.jobfile', job_id=job_id) + outgoing = os.path.join(self.job_dir, 'outgoing') + test_file_path = os.path.join(outgoing, 'out') + Path(test_file_path).mkdir(parents=True, exist_ok=True) + with open(os.path.join(test_file_path, 'test.txt'), 'w') as f: + f.write('job input test file') + + response = self.client.get(url, headers=self.headers) + self.assertEqual(response.status_code, 200) + memory_zip_file = io.BytesIO(response.data) + with zipfile.ZipFile(memory_zip_file, 'r', zipfile.ZIP_DEFLATED) as job_zip: + filenames = job_zip.namelist() + self.assertEqual(len(filenames), 1) + self.assertEqual(filenames[0], 'out/test.txt') + + def test_get_with_query_parameters(self): + job_id = 'chris-jid-4' + self.job_dir = os.path.join('/var/local/storeBase', 'key-' + job_id) + + with self.app.test_request_context(): + url = url_for('api.jobfile', job_id=job_id) + outgoing = os.path.join(self.job_dir, 'outgoing') + test_file_path = os.path.join(outgoing, 'out') + Path(test_file_path).mkdir(parents=True, exist_ok=True) + with open(os.path.join(test_file_path, 'test.txt'), 'w') as f: + f.write('job input test file') + + job_output_path = os.path.relpath(self.fs_output_path, self.fs_base_dir) + response = self.client.get(url, + query_string={'job_output_path': job_output_path}, + headers=self.headers) + self.assertEqual(response.status_code, 200) + content = json.loads(response.data.decode()) + self.assertEqual(content['job_output_path'], job_output_path) + self.assertEqual(content['rel_file_paths'], ["out/test.txt"]) diff --git a/unmake.sh b/unmake.sh index 9648135..4b0ad48 100755 --- a/unmake.sh +++ b/unmake.sh @@ -6,8 +6,9 @@ # # SYNPOSIS # -# unmake.sh [-h] [-N] -# [-O ] +# unmake.sh [-h] [-N] \ +# [-F ] \ +# [-O ] \ # [-S ] # # @@ -21,9 +22,13 @@ # # unmake.sh # -# Destroy pfcon dev instance operating in-network on Swarm: +# Destroy pfcon dev instance operating in-network on Swarm using Swift storage: # -# unmake.sh -N +# unmake.sh -N -F swift +# +# Destroy pfcon dev instance operating in-network on Swarm using mounted filesystem storage: +# +# unmake.sh -N -F filesystem # # Destroy pfcon dev instance on Kubernetes: # @@ -41,6 +46,12 @@ # # Optional print usage help. # +# -F +# +# Explicitly set the storage environment. This option must be swift or filesystem +# for pfcon operating in-network mode. For pfcon operating in out-of-network mode +# it must be set to zipfile (default). +# # -O # # Explicitly set the orchestrator. Default is swarm. @@ -56,18 +67,25 @@ source ./decorate.sh declare -i STEP=0 ORCHESTRATOR=swarm +STORAGE=zipfile print_usage () { - echo "Usage: ./unmake.sh [-h] [-N] [-O ] [-S ]" + echo "Usage: ./unmake.sh [-h] [-N] [-F ] [-O ] [-S ]" exit 1 } -while getopts ":hNO:S:" opt; do +while getopts ":hNF:O:S:" opt; do case $opt in h) print_usage ;; N) b_pfconInNetwork=1 ;; + F) STORAGE=$OPTARG + if ! [[ "$STORAGE" =~ ^(swift|filesystem|zipfile)$ ]]; then + echo "Invalid value for option -- F" + print_usage + fi + ;; O) ORCHESTRATOR=$OPTARG if ! [[ "$ORCHESTRATOR" =~ ^(swarm|kubernetes)$ ]]; then echo "Invalid value for option -- O" @@ -92,10 +110,15 @@ title -d 1 "Setting global exports..." fi if (( b_pfconInNetwork )) ; then echo -e "PFCON_INNETWORK=True" | ./boxes.sh + if [[ $STORAGE == 'zipfile' ]]; then + echo -e "Need to pass '-F ' when PFCON_INNETWORK=True" | ./boxes.sh + exit 1 + fi else echo -e "PFCON_INNETWORK=False" | ./boxes.sh fi echo -e "ORCHESTRATOR=$ORCHESTRATOR" | ./boxes.sh + echo -e "STORAGE=$STORAGE" | ./boxes.sh echo -e "exporting STOREBASE=$STOREBASE " | ./boxes.sh export STOREBASE=$STOREBASE export SOURCEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" @@ -107,9 +130,15 @@ title -d 1 "Destroying pfcon containerized dev environment on $ORCHESTRATOR" echo "docker stack rm pfcon_dev_stack" | ./boxes.sh ${LightCyan} docker stack rm pfcon_dev_stack if (( b_pfconInNetwork )) ; then - echo "docker volume rm -f pfcon_dev_stack_swift_storage_dev" - sleep 15 - docker volume rm pfcon_dev_stack_swift_storage_dev + if [[ $STORAGE == 'swift' ]]; then + echo "docker volume rm -f pfcon_dev_stack_swift_storage_dev" + sleep 15 + docker volume rm pfcon_dev_stack_swift_storage_dev + elif [[ $STORAGE == 'filesystem' ]]; then + echo "docker volume rm -f pfcon_dev_stack_fs_storage_dev" + sleep 15 + docker volume rm pfcon_dev_stack_fs_storage_dev + fi fi elif [[ $ORCHESTRATOR == kubernetes ]]; then if (( b_pfconInNetwork )) ; then