diff --git a/.gitignore b/.gitignore
index 97ed71946..289459ecf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,11 +4,12 @@ cmake/sodium_version
cmake/curl_version
cmake/zlib_version
common/proto/common/Version.proto
-compose/.env
-compose/globus
-compose/keys
-compose/logs
-compose/unset_env.sh
+compose/**/.env
+compose/**/.env*
+compose/**/globus
+compose/**/keys
+compose/**/logs
+compose/**/unset_env.sh
config/datafed.sh
config/datafed-authz.cfg
config/datafed-core.cfg
diff --git a/CHANGELOG.md b/CHANGELOG.md
index fa2614eec..9f3a3b766 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -27,6 +27,8 @@
python.
10. [952] - Bug in globus cleanup python script fixed, was trying to use projects
instead of the auth_client.
+11. [953] - Refactored docker compose build scripts to address technical debt.
+
# v2023.10.23.15.50
## MINOR Feature
diff --git a/compose/README.md b/compose/README.md
index 5a1e61143..cf3d198ba 100644
--- a/compose/README.md
+++ b/compose/README.md
@@ -1,16 +1,23 @@
# Compose Dev environment
-The Compose Dev environment is split into two different Compose files. The
+The Compose Dev environment is split into three different Compose files. The
"core metadata services" which comprise the web server, core server and database
and the "repo services" which comprise Globus Connect Server running with the
-authz library and the DataFed repo service.
+authz library and the DataFed repo service. Finally, there is all services which
+will let you run DataFed with a repository on the same machine.
-NOTE Standing up the repo services has been separated because of Globus. You
-will need a machine with firewall exceptions to use it.
+1. Only Metadata Compose Services
+2. Only DataFed Repository Services
+3. Both Metadata and Repository services
-## Core Compose Services
+NOTE Standing up the repo services has been separated because in many cases
+you might have your repository on a different machine, or may have multiple
+repositories connected to a single metadata server.
+NOTE You will need a machine with firewall exceptions to run the compose files.
-The following steps are used to stand up the core Compose file from scratch.
+## 1. Metadata Compose Services
+
+The following steps are used to stand up the metadata Compose file from scratch.
Some of steps you only have to do once.
1. Generating the env variables.
@@ -19,27 +26,27 @@ Some of steps you only have to do once.
4. Running the Compose file
5. Bringing down the Compose file.
-Core services only need an external port 443 for https access. I have been
-unable to get this to work with other ports due to the redirect URL required
-by Globus for authentication. It does not seem to support ports outside of 443.
-### 1. Generating .env configuration varaibles for Core Services
+If running the metadata services alone without the repository you will only
+need a firewall exception for port 443.
+
+### 1. Generating .env configuration varaibles for Metadata Core Services
Create the .env file fill in the missing components that are required.
```bash
./generate_env.sh
```
-### 2. Fill in the needed .env variables for the Core Services
+### 2. Fill in the needed .env variables for the Metadata Core Services
-The .env file will be created in the DataFed/Compose folder and will be hidden.
+The .env file will be created in the DataFed/compose/metadata folder and will be hidden.
The .env file variables can be changed at this point to your configuration.
NOTE the .env file will be read verbatim by Compose including any spaces or
"#" comments so do not includ anything but the exact text that needs to be
included in the variables.
-For the redirect url. if you are running the core services on your laptop you
+For the redirect url. if you are running the metadata core services on your laptop you
can use:
https://localhost/ui/authn
@@ -63,13 +70,14 @@ If a domain is assigned such as "awesome_datafed.com"
redirect: https://awesome_datafed.com/ui/authn
DATAFED_DOMAIN: awesome_datafed.com
-### 3. Building Core Services
+### 3. Building Metadata Core Services
The following command will build all the images to run the core metadata
services.
```bash
-./build_images_for_compose.sh
+cd DataFed/compose/metadata
+./build_metadata_images_for_compose.sh
```
### 4. Running the core Compose file
@@ -77,8 +85,9 @@ services.
Stand up the core services.
```bash
+cd DataFed/compose/metadata
source ./unset_env.sh
-docker compose -f ./compose_core.yml up
+docker compose -f ./compose.yml up
```
NOTE The unset_env.sh script is to make sure you are not accidentially
@@ -86,7 +95,8 @@ overwriting what is in the .env with your local shell env. You can check the
configuration before hand by running.
```bash
-docker compose -f compose_core.yml config
+cd DataFed/compose/metadata
+docker compose -f compose.yml config
```
WARNING - Docker Compose will prioritize env variables in the following priority
@@ -107,14 +117,15 @@ To completely remove the Compose instance and all state the following should
be run.
```bash
-docker compose -f ./compose_core.yml down --volumes
+cd DataFed/compose/metadata
+docker compose -f ./compose.yml down --volumes
```
NOTE the volumes will remove all cached state. If the '--volumes' flag is
not added then on a subsequent "Compose up" the database will not be a clean
install but contain state from previous runs.
-## Repo Compose Services
+## 2. Repo Compose Services
The following steps are used to stand up the repo Compose file. NOTE, that
because of how Globus is configured, there is an additional configuration
@@ -138,6 +149,7 @@ https://docs.globus.org/globus-connect-server/v5/
Create the .env file fill in the missing components that are required.
```bash
+cd DataFed/compose/repo
./generate_env.sh
```
@@ -152,11 +164,11 @@ included in the variables.
### 3. Globus configuration
-This step is only required once, after which the necessary files should exist
-in DataFed/Compose/globus. These files will contain the Globus configuration
-needed for additional cycles of "docker compose up" and "docker compose down".
-The following three things need to be done before the generate_globus_files.sh
-should be run.
+This step is only required once, after which the necessary files should exist in
+DataFed/Compose/repo/globus. These files will contain the Globus
+configuration needed for additional cycles of "docker compose up" and "docker
+compose down". The following three things need to be done before the
+generate_globus_files.sh should be run.
1. You will need to have globus-connect-server54 installed to do this step.
2. You will also need the Globus python developer kit globus_sdk.
@@ -178,6 +190,7 @@ pip install globus_sdk
Finally, we can generate the globus files by running the script.
```bash
+cd DataFed/compose/repo
./generate_globus_files.sh
```
@@ -190,6 +203,7 @@ The following command will build all the images to run the core metadata
services.
```bash
+cd DataFed/compose/repo
./build_repo_images_for_compose.sh
```
@@ -209,8 +223,9 @@ before launching the compose instance.
Stand up the repo services.
```bash
+cd DataFed/compose/repo
source ./unset_env.sh
-docker compose -f ./compose_repo.yml up
+docker compose -f ./compose.yml up
```
Be aware, the 'source' is to apply changes to the environment of your current
@@ -221,7 +236,8 @@ overwriting what is in the .env with your local shell env. You can check the
configuration before hand by running.
```bash
-docker compose -f compose_repo.yml config
+cd DataFed/compose/repo
+docker compose -f compose.yml config
```
WARNING - Docker Compose will prioritize env variables in the following priority
@@ -245,9 +261,17 @@ it is possible that the DATAFED_DOMAIN name field is incorrect your .env file.
## Cleaning up
```bash
-docker compose -f ./compose_core.yml down
+cd DataFed/compose/repo
+docker compose -f ./compose.yml down
```
+## 3. Running All Services
+
+For running all services, you will follow the same steps as for the getting the
+repository service up and running. The main difference from that set of steps,
+is that there is additional configuration in the .env file that will need to
+be added.
+
## Running isolated containers
If you just want to run a single container at a time with the same configuration
@@ -256,6 +280,7 @@ this can also be doen using commands like the following.
### DataFed Web
```bash
+cd DataFed/compose/repo
source ./unset_env.sh
docker run --env-file .env \
-e UID=$(id -u) \
@@ -267,6 +292,7 @@ docker run --env-file .env \
### DataFed GCS
```bash
+cd DataFed/compose/repo
docker run --env-file .env \
--network=host \
-v /home/cloud/compose_collection:/mnt/datafed \
@@ -296,4 +322,13 @@ include, ports
Make sure port 80 is not already bound on the host. Also note that the repo
server keys should exist in the keys folder before running the gcs instance.
+##### Repo server unable to connect to core server
+
+```
+datafed-repo-1 | 2024-05-15T11:41:23.406937Z ERROR /datafed/source/repository/server/RepoServer.cpp:checkServerVersion:178 { "thread_name": "repo_server", "correlation_id": "3fceb838-70f9-454d-94c4-4e2660dcc029", "message": "Timeout waiting for response from core server: tcp://localhost:7512" }
+ datafed-repo-1 | 2024-05-15T11:41:23.406992Z INFO /datafed/source/repository/server/RepoServer.cpp:checkServerVersion:161 { "thread_name": "repo_server", "message": "Attempt 4 to initialize communication with core server at tcp://localhost:7512" }
+```
+Make sure that the domain is correct, it may be the case that if you are using
+localhost it is unable to resolve core service, traffic gets routed by apache
+to the registered domain name.
diff --git a/compose/all/build_images_for_compose.sh b/compose/all/build_images_for_compose.sh
new file mode 100755
index 000000000..cf5d04e6e
--- /dev/null
+++ b/compose/all/build_images_for_compose.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_build_images.sh"
diff --git a/compose/all/cleanup_globus_files.sh b/compose/all/cleanup_globus_files.sh
new file mode 100755
index 000000000..e9990c5fd
--- /dev/null
+++ b/compose/all/cleanup_globus_files.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_cleanup_globus_files.sh" -d "$(pwd)"
+
diff --git a/compose/all/compose.yml b/compose/all/compose.yml
new file mode 100644
index 000000000..9dd3b1f4f
--- /dev/null
+++ b/compose/all/compose.yml
@@ -0,0 +1,158 @@
+
+# WARNING
+#
+# Any env variable that must be provided and overwrite what is in the container
+# Must be explicitly listed in the environment section of the specific service
+# --env file variables will not be default exist in the container.
+services:
+
+ datafed-web:
+ depends_on: ["datafed-core"]
+ environment:
+ DATAFED_GLOBUS_APP_SECRET: "${DATAFED_GLOBUS_APP_SECRET}"
+ DATAFED_GLOBUS_APP_ID: "${DATAFED_GLOBUS_APP_ID}"
+ DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}"
+ DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}"
+ DATAFED_DOMAIN: "${DATAFED_DOMAIN}"
+ DATAFED_HTTPS_SERVER_PORT: "${DATAFED_HTTPS_SERVER_PORT}"
+ DATAFED_WEB_CERT_PATH: "${DATAFED_WEB_CERT_PATH}"
+ DATAFED_WEB_KEY_PATH: "${DATAFED_WEB_KEY_PATH}"
+ DATAFED_WEB_USER: "datafed"
+ DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}"
+ DATAFED_CORE_ADDRESS_PORT_INTERNAL: "datafed-core:7513"
+ UID: "${DATAFED_UID}"
+ image: datafed-web:latest
+ ports:
+ - "8080:443" # This must be the same port that is mapped to the host for redirects to work
+ volumes:
+ - ./keys:/opt/datafed/keys
+ - ./logs:${DATAFED_CONTAINER_LOG_PATH}
+ networks:
+ - datafed-internal
+
+ datafed-core:
+ image: datafed-core:latest
+ depends_on:
+ datafed-foxx:
+ condition: service_healthy
+ environment:
+ DATAFED_GLOBUS_APP_SECRET: "${DATAFED_GLOBUS_APP_SECRET}"
+ DATAFED_GLOBUS_APP_ID: "${DATAFED_GLOBUS_APP_ID}"
+ DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}"
+ DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}"
+ DATAFED_DOMAIN: "${DATAFED_DOMAIN}"
+ DATAFED_WEB_CERT_PATH: "${DATAFED_WEB_CERT_PATH}"
+ DATAFED_WEB_KEY_PATH: "${DATAFED_WEB_KEY_PATH}"
+ DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}"
+ UID: "${DATAFED_UID}"
+ DATAFED_DATABASE_PASSWORD: "${DATAFED_DATABASE_PASSWORD}"
+ DATAFED_DATABASE_IP_ADDRESS: "${DATAFED_DATABASE_IP_ADDRESS}"
+ DATAFED_DATABASE_IP_ADDRESS_PORT: "${DATAFED_DATABASE_IP_ADDRESS}:${DATAFED_DATABASE_PORT}"
+ ports:
+ - 7513 # Communication web server
+ - 7512:7512 # Secure core server communication must be exposed outside of the container
+ volumes:
+ - ./keys:/opt/datafed/keys
+ - ./logs:${DATAFED_CONTAINER_LOG_PATH}
+ networks:
+ - datafed-internal
+
+ datafed-foxx:
+ image: datafed-foxx:latest
+ depends_on: ["arango"]
+ environment:
+ DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}"
+ DATAFED_DOMAIN: "${DATAFED_DOMAIN}"
+ DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}"
+ UID: "${DATAFED_UID}"
+ DATAFED_DATABASE_PASSWORD: "${DATAFED_DATABASE_PASSWORD}"
+ DATAFED_DATABASE_IP_ADDRESS: "${DATAFED_DATABASE_IP_ADDRESS}"
+ DATAFED_DATABASE_HOST: "arango"
+ healthcheck:
+ test: ["CMD", "/bin/bash", "-c", "[ -f /tmp/.foxx_is_installed ]"]
+ interval: 10s
+ timeout: 5s
+ retries: 20
+ volumes:
+ - foxx_tmp:/tmp
+ networks:
+ - datafed-internal
+
+ arango:
+ image: arangodb
+ environment:
+ ARANGO_ROOT_PASSWORD: "${DATAFED_DATABASE_PASSWORD}"
+ volumes:
+ - arango_db:/var/lib/arangodb3
+ ports:
+ - 8529:8529 # Arangodb web UI
+ networks:
+ - datafed-internal
+
+
+
+
+ # Needs the datafed-core to be up so it doesn't fail when trying to connect
+ datafed-repo:
+ depends_on: ["datafed-core"]
+ environment:
+ DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}"
+ DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}"
+ DATAFED_DOMAIN: "${DATAFED_DOMAIN}"
+ DATAFED_HTTPS_SERVER_PORT: "${DATAFED_HTTPS_SERVER_PORT}"
+ DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}"
+ DATAFED_CORE_ADDRESS_PORT_INTERNAL: "${DATAFED_DOMAIN}:7513"
+ DATAFED_GCS_COLLECTION_ROOT_PATH: "/mnt/datafed"
+ UID: "${DATAFED_UID}"
+ HOST_HOSTNAME: "localhost"
+ image: datafed-repo:latest
+ volumes:
+ - ./keys:/opt/datafed/keys
+ - ./logs:${DATAFED_CONTAINER_LOG_PATH}
+ - ${DATAFED_HOST_COLLECTION_MOUNT}:/mnt/datafed
+ ports:
+ - 9000:9000 # Communication core server
+
+# Needs host port 80 for apache
+# Needs ports 50000 - 51000 for GridFTP
+# Needs port 443 for control port
+# Needs the datafed-web server to be up so that it can download the public key
+ datafed-gcs:
+ depends_on: ["datafed-web"]
+ environment:
+ DATAFED_ZEROMQ_SESSION_SECRET: "${DATAFED_ZEROMQ_SESSION_SECRET}"
+ DATAFED_ZEROMQ_SYSTEM_SECRET: "${DATAFED_ZEROMQ_SYSTEM_SECRET}"
+ DATAFED_DOMAIN: "${DATAFED_DOMAIN}"
+ DATAFED_HTTPS_SERVER_PORT: "${DATAFED_HTTPS_SERVER_PORT}"
+ DATAFED_DEFAULT_LOG_PATH: "${DATAFED_CONTAINER_LOG_PATH}"
+ DATAFED_CORE_ADDRESS_PORT_INTERNAL: "datafed-core:7513"
+ DATAFED_GCS_ROOT_NAME: "${DATAFED_GCS_ROOT_NAME}"
+ DATAFED_REPO_ID_AND_DIR: "${DATAFED_REPO_ID_AND_DIR}"
+ DATAFED_GLOBUS_SUBSCRIPTION: "${DATAFED_GLOBUS_SUBSCRIPTION}"
+ DATAFED_GLOBUS_CONTROL_PORT: "${DATAFED_GLOBUS_CONTROL_PORT}"
+ DATAFED_GCS_COLLECTION_ROOT_PATH: "/mnt/datafed"
+ DATAFED_REPO_USER: "${DATAFED_REPO_USER}"
+ UID: "${DATAFED_UID}"
+ HOST_HOSTNAME: "localhost"
+ DATAFED_AUTHZ_USER: "datafed"
+ DATAFED_GCS_IP: "${DATAFED_GCS_IP}"
+ network_mode: host
+ image: datafed-gcs:latest
+ volumes:
+ - ./keys:/opt/datafed/keys
+ - ./globus:/opt/datafed/globus
+ - ./logs:${DATAFED_CONTAINER_LOG_PATH}
+ - ${DATAFED_HOST_COLLECTION_MOUNT}:/mnt/datafed
+
+ # ports:
+ # - "8081:443" # This must be the same port that is mapped to the host for redirects to work
+ # - "50000-50100:50000-50100"
+
+volumes:
+ foxx_tmp:
+ arango_db:
+
+networks:
+ datafed-internal:
+ driver: bridge
+
diff --git a/compose/all/generate_env.sh b/compose/all/generate_env.sh
new file mode 100755
index 000000000..87b51b4af
--- /dev/null
+++ b/compose/all/generate_env.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_generate_env.sh" -d "$(pwd)"
+
diff --git a/compose/all/generate_globus_files.sh b/compose/all/generate_globus_files.sh
new file mode 100755
index 000000000..a2df4cff5
--- /dev/null
+++ b/compose/all/generate_globus_files.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_generate_globus_files.sh" -d "$(pwd)"
diff --git a/compose/all/globus-connect-server.log b/compose/all/globus-connect-server.log
new file mode 100644
index 000000000..e69de29bb
diff --git a/compose/build_images_for_compose.sh b/compose/build_images_for_compose.sh
deleted file mode 100755
index 7b155465f..000000000
--- a/compose/build_images_for_compose.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-
-set -euf -o pipefail
-
-SCRIPT=$(realpath "$0")
-SOURCE=$(dirname "$SCRIPT")
-PROJECT_ROOT=$(realpath "${SOURCE}/../")
-
-docker build \
- -f "${PROJECT_ROOT}/docker/Dockerfile.dependencies" \
- "${PROJECT_ROOT}" \
- -t datafed-dependencies:latest
-docker build \
- -f "${PROJECT_ROOT}/docker/Dockerfile.runtime" \
- "${PROJECT_ROOT}" \
- -t datafed-runtime:latest
-docker build -f \
- "${PROJECT_ROOT}/core/docker/Dockerfile" \
- --build-arg DEPENDENCIES="datafed-dependencies" \
- --build-arg RUNTIME="datafed-runtime" \
- "${PROJECT_ROOT}" \
- -t datafed-core:latest
-docker build -f \
- "${PROJECT_ROOT}/web/docker/Dockerfile" \
- --build-arg DEPENDENCIES="datafed-dependencies" \
- --build-arg RUNTIME="datafed-runtime" \
- "${PROJECT_ROOT}" \
- -t datafed-web:latest
-docker build -f \
- "${PROJECT_ROOT}/docker/Dockerfile.foxx" \
- --build-arg DEPENDENCIES="datafed-dependencies" \
- --build-arg RUNTIME="datafed-runtime" \
- "${PROJECT_ROOT}" \
- -t datafed-foxx:latest
-
diff --git a/compose/build_repo_images_for_compose.sh b/compose/build_repo_images_for_compose.sh
deleted file mode 100755
index 3ab2994ce..000000000
--- a/compose/build_repo_images_for_compose.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-
-set -euf -o pipefail
-
-SCRIPT=$(realpath "$0")
-SOURCE=$(dirname "$SCRIPT")
-PROJECT_ROOT=$(realpath "${SOURCE}/../")
-. "${PROJECT_ROOT}/scripts/dependency_versions.sh"
-
-cd "${PROJECT_ROOT}/external/globus-connect-server-deploy/docker"
-git checkout "$DATAFED_GCS_SUBMODULE_VERSION"
-docker build --progress plain --tag "gcs-ubuntu-base:latest" - < "./docker-files/Dockerfile.ubuntu-20.04"
-cd "${PROJECT_ROOT}"
-docker build \
- -f "${PROJECT_ROOT}/docker/Dockerfile.dependencies" \
- "${PROJECT_ROOT}" \
- -t datafed-dependencies:latest
-docker build \
- -f "${PROJECT_ROOT}/docker/Dockerfile.runtime" \
- "${PROJECT_ROOT}" \
- -t datafed-runtime:latest
-docker build -f \
- "${PROJECT_ROOT}/repository/docker/Dockerfile" \
- --build-arg DEPENDENCIES="datafed-dependencies" \
- --build-arg RUNTIME="datafed-runtime" \
- "${PROJECT_ROOT}" \
- -t datafed-repo:latest
-docker build -f \
- "${PROJECT_ROOT}/repository/docker/Dockerfile.gcs" \
- --build-arg DEPENDENCIES="datafed-dependencies" \
- --build-arg RUNTIME="datafed-runtime" \
- --build-arg GCS_IMAGE="gcs-ubuntu-base" \
- "${PROJECT_ROOT}" \
- -t datafed-gcs:latest
diff --git a/compose/cleanup_globus_files.sh b/compose/cleanup_globus_files.sh
deleted file mode 100755
index f262cef05..000000000
--- a/compose/cleanup_globus_files.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/bin/bash
-SCRIPT=$(realpath "$0")
-SOURCE=$(dirname "$SCRIPT")
-PROJECT_ROOT=$(realpath "${SOURCE}/..")
-
-# This script should be run after generating the .env file as it will pull
-# values from the .env file
-
-if [ ! -f "${PROJECT_ROOT}/compose/.env" ]
-then
- echo "Missing . ${PROJECT_ROOT}/compose/.env file. This file needs to be"
- echo "created first"
- exit 1
-fi
-
-. "${PROJECT_ROOT}/compose/.env"
-
-export DATAFED_GLOBUS_DEPLOYMENT_KEY_PATH="$DATAFED_HOST_DEPLOYMENT_KEY_PATH"
-export DATAFED_GLOBUS_CRED_FILE_PATH="$DATAFED_HOST_CRED_FILE_PATH"
-
-if [ -f "$DATAFED_HOST_CRED_FILE_PATH" ]
-then
- export GCS_CLI_CLIENT_ID=$(jq -r .client < "${DATAFED_HOST_CRED_FILE_PATH}")
- export GCS_CLI_CLIENT_SECRET=$(jq -r .secret < "${DATAFED_HOST_CRED_FILE_PATH}")
-fi
-
-if [ -f "$DATAFED_GLOBUS_DEPLOYMENT_KEY_PATH" ]
-then
- export GCS_CLI_ENDPOINT_ID=$(jq -r .client_id < "${DATAFED_GLOBUS_DEPLOYMENT_KEY_PATH}")
-fi
-
-sudo globus-connect-server node cleanup
-
-DATAFED_GCS_ROOT_NAME="$DATAFED_GCS_ROOT_NAME" \
-python3 "${PROJECT_ROOT}/scripts/globus/globus_cleanup.py"
diff --git a/compose/generate_globus_files.sh b/compose/generate_globus_files.sh
deleted file mode 100755
index 82f5dba89..000000000
--- a/compose/generate_globus_files.sh
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/bin/bash
-SCRIPT=$(realpath "$0")
-SOURCE=$(dirname "$SCRIPT")
-PROJECT_ROOT=$(realpath "${SOURCE}/..")
-
-# This script should be run after generating the .env file as it will pull
-# values from the .env file
-
-if [ ! -f "${PROJECT_ROOT}/compose/.env" ]
-then
- echo "Missing . ${PROJECT_ROOT}/compose/.env file. This file needs to be"
- echo "created first"
- exit 1
-fi
-
-local_DATAFED_GLOBUS_KEY_DIR="${PROJECT_ROOT}/compose/globus"
-if [ ! -d "$local_DATAFED_GLOBUS_KEY_DIR" ]
-then
- mkdir -p "$local_DATAFED_GLOBUS_KEY_DIR"
-fi
-
-. "${PROJECT_ROOT}/compose/.env"
-
-DATAFED_GLOBUS_DEPLOYMENT_KEY_PATH="$DATAFED_HOST_DEPLOYMENT_KEY_PATH" \
-DATAFED_GLOBUS_CRED_FILE_PATH="$DATAFED_HOST_CRED_FILE_PATH" \
-DATAFED_GLOBUS_CONTROL_PORT="$DATAFED_GLOBUS_CONTROL_PORT" \
-DATAFED_GLOBUS_SUBSCRIPTION="$DATAFED_GLOBUS_SUBSCRIPTION" \
-DATAFED_GCS_ROOT_NAME="$DATAFED_GCS_ROOT_NAME" \
- python3 "${PROJECT_ROOT}/scripts/globus/initialize_globus_endpoint.py"
diff --git a/compose/metadata/build_metadata_images_for_compose.sh b/compose/metadata/build_metadata_images_for_compose.sh
new file mode 100755
index 000000000..48e603307
--- /dev/null
+++ b/compose/metadata/build_metadata_images_for_compose.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_build_images.sh" -m
diff --git a/compose/compose_core.yml b/compose/metadata/compose.yml
similarity index 100%
rename from compose/compose_core.yml
rename to compose/metadata/compose.yml
diff --git a/compose/metadata/generate_env.sh b/compose/metadata/generate_env.sh
new file mode 100755
index 000000000..aed7c70e4
--- /dev/null
+++ b/compose/metadata/generate_env.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_generate_env.sh" -d "$(pwd)" -m
+
diff --git a/compose/repo/build_repo_images_for_compose.sh b/compose/repo/build_repo_images_for_compose.sh
new file mode 100755
index 000000000..22ac6e454
--- /dev/null
+++ b/compose/repo/build_repo_images_for_compose.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_build_images.sh" -r
diff --git a/compose/repo/cleanup_globus_files.sh b/compose/repo/cleanup_globus_files.sh
new file mode 100755
index 000000000..e9990c5fd
--- /dev/null
+++ b/compose/repo/cleanup_globus_files.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_cleanup_globus_files.sh" -d "$(pwd)"
+
diff --git a/compose/compose_repo.yml b/compose/repo/compose.yml
similarity index 100%
rename from compose/compose_repo.yml
rename to compose/repo/compose.yml
diff --git a/compose/repo/generate_env.sh b/compose/repo/generate_env.sh
new file mode 100755
index 000000000..31eca3ec5
--- /dev/null
+++ b/compose/repo/generate_env.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../../")
+
+"${PROJECT_ROOT}/scripts/compose_generate_env.sh" -d "$(pwd)" -r
+
diff --git a/compose/repo/generate_globus_files.sh b/compose/repo/generate_globus_files.sh
new file mode 100755
index 000000000..71442d609
--- /dev/null
+++ b/compose/repo/generate_globus_files.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/..")
+
+"${PROJECT_ROOT}/scripts/compose_generate_globus_files.sh" -d "$(pwd)"
diff --git a/external/globus-connect-server-deploy b/external/globus-connect-server-deploy
index 1d42a0df7..38a40b7a0 160000
--- a/external/globus-connect-server-deploy
+++ b/external/globus-connect-server-deploy
@@ -1 +1 @@
-Subproject commit 1d42a0df7cd8389718205b038621677ebb575306
+Subproject commit 38a40b7a00f86b2355663680c328c1fef3311f6a
diff --git a/repository/docker/000-default.conf b/repository/docker/000-default.conf
new file mode 100644
index 000000000..e3755b444
--- /dev/null
+++ b/repository/docker/000-default.conf
@@ -0,0 +1,60 @@
+
+ # The ServerName directive sets the request scheme, hostname and port that
+ # the server uses to identify itself. This is used when creating
+ # redirection URLs. In the context of virtual hosts, the ServerName
+ # specifies what hostname must appear in the request's Host: header to
+ # match this virtual host. For the default virtual host (this file) this
+ # value is not decisive as it is used as a last resort host regardless.
+ # However, you must set it for any further virtual host explicitly.
+ #ServerName www.example.com
+
+ ServerAdmin webmaster@localhost
+ DocumentRoot /var/www/html
+
+ # Available loglevels: trace8, ..., trace1, debug, info, notice, warn,
+ # error, crit, alert, emerg.
+ # It is also possible to configure the loglevel for particular
+ # modules, e.g.
+ #LogLevel info ssl:warn
+
+ ErrorLog ${APACHE_LOG_DIR}/error.log
+ CustomLog ${APACHE_LOG_DIR}/access.log combined
+
+ # For most configuration files from conf-available/, which are
+ # enabled or disabled at a global level, it is possible to
+ # include a line for only one particular virtual host. For example the
+ # following line enables the CGI configuration for this host only
+ # after it has been globally disabled with "a2disconf".
+ #Include conf-available/serve-cgi-bin.conf
+
+
+# vim: syntax=apache ts=4 sw=4 sts=4 sr noet
+
+ ServerName datafed-gcs-test.ornl.gov
+
+ SSLEngine on
+ SSLCertificateFile /opt/datafed/keys/cert.crt
+ SSLCertificateKeyFile /opt/datafed/keys/cert.key
+
+ # SSL configuration
+ SSLProtocol TLSv1.2 TLSv1.3
+ SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH
+ SSLHonorCipherOrder on
+
+ # Proxy settings
+ ProxyPass / https://localhost:8080/
+ ProxyPassReverse / https://localhost:8080/
+ ProxyPreserveHost On
+ RequestHeader set X-Forwarded-Proto "https"
+
+ # Additional proxy SSL settings
+ SSLProxyEngine on
+ SSLProxyVerify none
+ SSLProxyCheckPeerCN off
+ SSLProxyCheckPeerName off
+ SSLProxyCheckPeerExpire off
+
+ SSLProxyVerifyDepth 2
+ SSLProxyCACertificateFile /opt/datafed/keys/cert.crt
+
+
diff --git a/repository/docker/Dockerfile.gcs b/repository/docker/Dockerfile.gcs
index 635725150..4e216affa 100644
--- a/repository/docker/Dockerfile.gcs
+++ b/repository/docker/Dockerfile.gcs
@@ -33,6 +33,9 @@ ENV BUILD_DIR="$BUILD_DIR"
ENV LIB_DIR="$LIB_DIR"
ENV DATAFED_GLOBUS_REPO_USER="datafed"
ENV DATAFED_DEFAULT_LOG_PATH="$DATAFED_INSTALL_PATH/logs"
+# Value needed so tput command doesn't crash
+ENV TERM="xterm"
+ENV DATAFED_GCS_IP=""
RUN mkdir -p ${BUILD_DIR}
RUN mkdir -p ${DATAFED_INSTALL_PATH}/logs
@@ -139,6 +142,8 @@ COPY --chown=datafed:root ./repository/docker/entrypoint_authz.sh ${BUILD_DIR}/r
USER root
+COPY ./repository/docker/000-default.conf /etc/apache2/sites-available/000-default.conf
+RUN chmod 644 /etc/apache2/sites-available/000-default.conf
# Remove --client-id from command because it was deprecated
RUN sed -i '/--client-id/d' /entrypoint.sh
diff --git a/repository/docker/entrypoint_authz.sh b/repository/docker/entrypoint_authz.sh
index 758513b21..b32549cc6 100755
--- a/repository/docker/entrypoint_authz.sh
+++ b/repository/docker/entrypoint_authz.sh
@@ -76,6 +76,7 @@ if [ -L "$link" ]; then
rm "$link"
fi
+export NODE_SETUP_ARGS="--ip-address ${DATAFED_GCS_IP}"
# Run the GCS entrypoint file in the background
/entrypoint.sh &
@@ -103,33 +104,39 @@ echo "globus-gridftp-server pid file found!"
# Need to wait until the domain name is properly registered
DATAFED_GCS_URL=$(jq -r .domain_name < /var/lib/globus-connect-server/info.json)
-
+set +e
HTTP_CODE=$("${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/curl" -s -o /dev/null -w "%{http_code}\n" -I "https://${DATAFED_GCS_URL}/api/info")
+echo "$?"
+set -e
echo "Waiting for domain name (https://${DATAFED_GCS_URL}) to be registered! Code: $HTTP_CODE"
printf "\n"
minutes=0
while [ "$HTTP_CODE" != "200" ]
do
- EraseToEOL=$(tput el)
-
- msg="Minutes $minutes "
- for i in {1..12}
- do
- printf "%s" "${msg}"
- msg='.'
- sleep 5
-
- HTTP_CODE=$("${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/curl" -s -o /dev/null -w "%{http_code}\n" -I "https://${DATAFED_GCS_URL}/api/info")
- if [ "$HTTP_CODE" == "200" ]
- then
- break
- fi
- done
- printf "\r${EraseToEOL}"
-
- minutes=$((minutes + 1))
+ EraseToEOL=$(tput el)
+
+ msg="Minutes $minutes "
+ for i in {1..12}
+ do
+ printf "%s" "${msg}"
+ msg='.'
+ sleep 5
+
+ set +e
+ HTTP_CODE=$("${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/curl" -s -o /dev/null -w "%{http_code}\n" -I "https://${DATAFED_GCS_URL}/api/info")
+ set -e
+ if [ "$HTTP_CODE" == "200" ]
+ then
+ break
+ fi
+ done
+ printf "\r${EraseToEOL}"
+
+ minutes=$((minutes + 1))
+ set +e
HTTP_CODE=$("${DATAFED_DEPENDENCIES_INSTALL_PATH}/bin/curl" -s -o /dev/null -w "%{http_code}\n" -I "https://${DATAFED_GCS_URL}/api/info")
+ set -e
done
printf "\n"
diff --git a/scripts/compose_build_images.sh b/scripts/compose_build_images.sh
new file mode 100755
index 000000000..d470607a0
--- /dev/null
+++ b/scripts/compose_build_images.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+
+set -euf -o pipefail
+
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/../")
+
+Help()
+{
+ echo "$(basename $0) Build images for compose run by default will build all."
+ echo
+ echo "Syntax: $(basename $0) [-h|r|m]"
+ echo "options:"
+ echo "-h, --help Print this help message"
+ echo "-r, --repo-images Build the repository images for "
+ echo " datafed."
+ echo "-m, --metadata-images Build the images for metadata services."
+}
+
+VALID_ARGS=$(getopt -o hmr --long 'help',repo-images,metadata-images -- "$@")
+
+BUILD_REPO="TRUE"
+BUILD_METADATA="TRUE"
+eval set -- "$VALID_ARGS"
+while [ : ]; do
+ case "$1" in
+ -h | --help)
+ Help
+ exit 0
+ ;;
+ -r | --repo-images)
+ BUILD_METADATA="FALSE"
+ shift 1
+ ;;
+ -m | --metadata-images)
+ BUILD_REPO="FALSE"
+ shift 1
+ ;;
+ --) shift;
+ break
+ ;;
+ \?) # incorrect option
+ echo "Error: Invalid option"
+ exit;;
+ esac
+done
+
+
+if [[ "$BUILD_METADATA" == "TRUE" ]]
+then
+ docker build \
+ -f "${PROJECT_ROOT}/docker/Dockerfile.dependencies" \
+ "${PROJECT_ROOT}" \
+ -t datafed-dependencies:latest
+ docker build \
+ -f "${PROJECT_ROOT}/docker/Dockerfile.runtime" \
+ "${PROJECT_ROOT}" \
+ -t datafed-runtime:latest
+ docker build -f \
+ "${PROJECT_ROOT}/core/docker/Dockerfile" \
+ --build-arg DEPENDENCIES="datafed-dependencies" \
+ --build-arg RUNTIME="datafed-runtime" \
+ "${PROJECT_ROOT}" \
+ -t datafed-core:latest
+ docker build -f \
+ "${PROJECT_ROOT}/web/docker/Dockerfile" \
+ --build-arg DEPENDENCIES="datafed-dependencies" \
+ --build-arg RUNTIME="datafed-runtime" \
+ "${PROJECT_ROOT}" \
+ -t datafed-web:latest
+ docker build -f \
+ "${PROJECT_ROOT}/docker/Dockerfile.foxx" \
+ --build-arg DEPENDENCIES="datafed-dependencies" \
+ --build-arg RUNTIME="datafed-runtime" \
+ "${PROJECT_ROOT}" \
+ -t datafed-foxx:latest
+fi
+
+if [[ "$BUILD_REPO" == "TRUE" ]]
+then
+ source "${PROJECT_ROOT}/scripts/dependency_versions.sh"
+ cd "${PROJECT_ROOT}/external/globus-connect-server-deploy/docker"
+ git checkout "$DATAFED_GCS_SUBMODULE_VERSION"
+ docker build --progress plain --tag "gcs-ubuntu-base:latest" - < "./docker-files/Dockerfile.ubuntu-20.04"
+ cd "${PROJECT_ROOT}"
+ docker build \
+ -f "${PROJECT_ROOT}/docker/Dockerfile.dependencies" \
+ "${PROJECT_ROOT}" \
+ -t datafed-dependencies:latest
+ docker build \
+ -f "${PROJECT_ROOT}/docker/Dockerfile.runtime" \
+ "${PROJECT_ROOT}" \
+ -t datafed-runtime:latest
+ docker build -f \
+ "${PROJECT_ROOT}/repository/docker/Dockerfile" \
+ --build-arg DEPENDENCIES="datafed-dependencies" \
+ --build-arg RUNTIME="datafed-runtime" \
+ "${PROJECT_ROOT}" \
+ -t datafed-repo:latest
+ docker build -f \
+ "${PROJECT_ROOT}/repository/docker/Dockerfile.gcs" \
+ --build-arg DEPENDENCIES="datafed-dependencies" \
+ --build-arg RUNTIME="datafed-runtime" \
+ --build-arg GCS_IMAGE="gcs-ubuntu-base" \
+ "${PROJECT_ROOT}" \
+ -t datafed-gcs:latest
+fi
diff --git a/scripts/compose_cleanup_globus_files.sh b/scripts/compose_cleanup_globus_files.sh
new file mode 100755
index 000000000..4bffefb10
--- /dev/null
+++ b/scripts/compose_cleanup_globus_files.sh
@@ -0,0 +1,73 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/..")
+
+# This script should be run after generating the .env file as it will pull
+# values from the .env file
+Help()
+{
+ echo "$(basename $0) cleanup globus files. Note the .env file must exist."
+ echo " in the specified directory."
+ echo
+ echo "Syntax: $(basename $0) [-h|d]"
+ echo "options:"
+ echo "-h, --help Print this help message"
+ echo "-d, --directory Directory where globus folder is."
+}
+
+VALID_ARGS=$(getopt -o hd: --long 'help',directory: -- "$@")
+
+DIRECTORY=""
+eval set -- "$VALID_ARGS"
+while [ : ]; do
+ case "$1" in
+ -h | --help)
+ Help
+ exit 0
+ ;;
+ -d | --directory)
+ DIRECTORY="$2"
+ shift 2
+ ;;
+ --) shift;
+ break
+ ;;
+ \?) # incorrect option
+ echo "Error: Invalid option"
+ exit;;
+ esac
+done
+
+if [ ! -d "${DIRECTORY}" ]
+then
+ echo "The provided directory does not seem to exist: ${DIRECTORY}"
+fi
+
+if [ ! -f "${DIRECTORY}/.env" ]
+then
+ echo "Missing . ${DIRECTORY}/.env file. This file needs to be"
+ echo "created first"
+ exit 1
+fi
+
+. "${DIRECTORY}/.env"
+
+export DATAFED_GLOBUS_DEPLOYMENT_KEY_PATH="$DATAFED_HOST_DEPLOYMENT_KEY_PATH"
+export DATAFED_GLOBUS_CRED_FILE_PATH="$DATAFED_HOST_CRED_FILE_PATH"
+
+if [ -f "$DATAFED_HOST_CRED_FILE_PATH" ]
+then
+ export GCS_CLI_CLIENT_ID=$(jq -r .client < "${DATAFED_HOST_CRED_FILE_PATH}")
+ export GCS_CLI_CLIENT_SECRET=$(jq -r .secret < "${DATAFED_HOST_CRED_FILE_PATH}")
+fi
+
+if [ -f "$DATAFED_GLOBUS_DEPLOYMENT_KEY_PATH" ]
+then
+ export GCS_CLI_ENDPOINT_ID=$(jq -r .client_id < "${DATAFED_GLOBUS_DEPLOYMENT_KEY_PATH}")
+fi
+
+sudo globus-connect-server node cleanup
+
+DATAFED_GCS_ROOT_NAME="$DATAFED_GCS_ROOT_NAME" \
+python3 "${PROJECT_ROOT}/scripts/globus/globus_cleanup.py"
diff --git a/compose/generate_env.sh b/scripts/compose_generate_env.sh
similarity index 76%
rename from compose/generate_env.sh
rename to scripts/compose_generate_env.sh
index 8beae8fe6..14751e546 100755
--- a/compose/generate_env.sh
+++ b/scripts/compose_generate_env.sh
@@ -3,13 +3,64 @@ SCRIPT=$(realpath "$0")
SOURCE=$(dirname "$SCRIPT")
PROJECT_ROOT=$(realpath "${SOURCE}/..")
-if [ -f ".env" ]
+Help()
+{
+ echo "$(basename $0) Build .env file for compose."
+ echo
+ echo "Syntax: $(basename $0) [-h|d|r|m]"
+ echo "options:"
+ echo "-h, --help Print this help message"
+ echo "-d, --directory Directory where .env will be created."
+ echo "-r, --repo-images Create .env for just repo services."
+ echo "-m, --metadata-images Create .env for just metadata services"
+}
+
+VALID_ARGS=$(getopt -o hd:mr --long 'help',directory:,repo-images,metadata-images -- "$@")
+
+BUILD_REPO="TRUE"
+BUILD_METADATA="TRUE"
+COMPOSE_ENV_DIR=""
+eval set -- "$VALID_ARGS"
+while [ : ]; do
+ case "$1" in
+ -h | --help)
+ Help
+ exit 0
+ ;;
+ -d | --directory)
+ COMPOSE_ENV_DIR="$2"
+ shift 2
+ ;;
+ -r | --repo-images)
+ BUILD_METADATA="FALSE"
+ shift 1
+ ;;
+ -m | --metadata-images)
+ BUILD_REPO="FALSE"
+ shift 1
+ ;;
+ --) shift;
+ break
+ ;;
+ \?) # incorrect option
+ echo "Error: Invalid option"
+ exit;;
+ esac
+done
+
+if [ ! -d "${COMPOSE_ENV_DIR}" ]
then
- echo ".env already exist! Will not overwrite!"
+ echo "Invalid folder for .env file specified ${COMPOSE_ENV_DIR}"
exit 1
fi
-local_DATAFED_WEB_KEY_DIR="${PROJECT_ROOT}/compose/keys"
+if [ -f "${COMPOSE_ENV_DIR}/.env" ]
+then
+ echo "${COMPOSE_ENV_DIR}/.env already exist! Will not overwrite!"
+ exit 1
+fi
+
+local_DATAFED_WEB_KEY_DIR="${COMPOSE_ENV_DIR}/keys"
if [ ! -d "$local_DATAFED_WEB_KEY_DIR" ]
then
mkdir -p "$local_DATAFED_WEB_KEY_DIR"
@@ -79,7 +130,7 @@ else
fi
if [ -z "${DATAFED_GLOBUS_KEY_DIR}" ]
then
- local_DATAFED_GLOBUS_KEY_DIR="${PROJECT_ROOT}/compose/globus"
+ local_DATAFED_GLOBUS_KEY_DIR="${COMPOSE_ENV_DIR}/globus"
else
local_DATAFED_GLOBUS_KEY_DIR=$(printenv DATAFED_GLOBUS_KEY_DIR)
fi
@@ -128,6 +179,13 @@ else
local_DATAFED_COMPOSE_DATABASE_PORT=$(printenv DATAFED_COMPOSE_DATABASE_PORT)
fi
+if [ -z "${DATAFED_COMPOSE_GCS_IP}" ]
+then
+ local_DATAFED_COMPOSE_GCS_IP=""
+else
+ local_DATAFED_COMPOSE_GCS_IP=$(printenv DATAFED_COMPOSE_GCS_IP)
+fi
+
if [ -z "${DATAFED_COMPOSE_HOST_COLLECTION_MOUNT}" ]
then
local_DATAFED_HOST_COLLECTION_MOUNT="$HOME/compose_collection"
@@ -169,36 +227,58 @@ fi
# Make the logs folder if it doesn't exist
-mkdir -p "${PROJECT_ROOT}/compose/logs"
+mkdir -p "${COMPOSE_ENV_DIR}/logs"
+
+if [ -f "${COMPOSE_ENV_DIR}/.env" ]
+then
+ rm "${COMPOSE_ENV_DIR}/.env"
+fi
+touch "${COMPOSE_ENV_DIR}/.env"
# Do not put " around anything and do not add comments in the .env file
-cat << EOF > ".env"
-DATAFED_REPO_USER=datafed
-DATAFED_USER89_PASSWORD=${local_DATAFED_COMPOSE_USER89_PASSWORD}
-DATAFED_REPO_FORM_PATH=${local_DATAFED_COMPOSE_REPO_FORM_PATH}
+
+if [ "${BUILD_METADATA}" == "TRUE" ] || [ "${BUILD_REPO}" == "TRUE" ]
+then
+
+cat << EOF >> "${COMPOSE_ENV_DIR}/.env"
+DATAFED_HTTPS_SERVER_PORT=${local_DATAFED_COMPOSE_HTTPS_SERVER_PORT}
+DATAFED_DOMAIN=${local_DATAFED_COMPOSE_DOMAIN}
+DATAFED_UID=$(id -u)
+DATAFED_CONTAINER_LOG_PATH=${local_DATAFED_COMPOSE_CONTAINER_LOG_PATH}
+EOF
+fi
+
+if [ "${BUILD_METADATA}" == "TRUE" ]
+then
+cat << EOF >> "${COMPOSE_ENV_DIR}/.env"
DATAFED_GLOBUS_APP_SECRET=${local_DATAFED_COMPOSE_GLOBUS_APP_SECRET}
DATAFED_GLOBUS_APP_ID=${local_DATAFED_COMPOSE_GLOBUS_APP_ID}
DATAFED_ZEROMQ_SESSION_SECRET=${local_DATAFED_COMPOSE_ZEROMQ_SESSION_SECRET}
DATAFED_ZEROMQ_SYSTEM_SECRET=${local_DATAFED_COMPOSE_ZEROMQ_SYSTEM_SECRET}
-DATAFED_DOMAIN=${local_DATAFED_COMPOSE_DOMAIN}
-DATAFED_HTTPS_SERVER_PORT=${local_DATAFED_COMPOSE_HTTPS_SERVER_PORT}
DATAFED_WEB_CERT_PATH=/opt/datafed/keys/${local_DATAFED_WEB_CERT_NAME}
DATAFED_WEB_KEY_PATH=/opt/datafed/keys/${local_DATAFED_WEB_KEY_NAME}
-DATAFED_CONTAINER_LOG_PATH=${local_DATAFED_COMPOSE_CONTAINER_LOG_PATH}
DATAFED_DATABASE_PASSWORD=${local_DATAFED_COMPOSE_DATABASE_PASSWORD}
DATAFED_DATABASE_IP_ADDRESS=${local_DATAFED_COMPOSE_DATABASE_IP_ADDRESS}
DATAFED_DATABASE_PORT=${local_DATAFED_COMPOSE_DATABASE_PORT}
+EOF
+fi
+
+if [ "${BUILD_REPO}" == "TRUE" ]
+then
+cat << EOF >> "${COMPOSE_ENV_DIR}/.env"
+DATAFED_REPO_USER=datafed
DATAFED_GCS_ROOT_NAME=DataFed_Compose
+DATAFED_GCS_IP=${local_DATAFED_COMPOSE_GCS_IP}
DATAFED_REPO_ID_AND_DIR=compose-home
-DATAFED_UID=$(id -u)
DATAFED_HOST_COLLECTION_MOUNT=${local_DATAFED_HOST_COLLECTION_MOUNT}
DATAFED_HOST_DEPLOYMENT_KEY_PATH=${local_DATAFED_COMPOSE_HOST_DEPLOYMENT_KEY_PATH}
DATAFED_HOST_CRED_FILE_PATH=${local_DATAFED_HOST_CRED_FILE_PATH}
DATAFED_GLOBUS_CONTROL_PORT=${local_DATAFED_GLOBUS_CONTROL_PORT}
DATAFED_GLOBUS_SUBSCRIPTION=${local_DATAFED_GLOBUS_SUBSCRIPTION}
EOF
+fi
-unset_env_file_name="unset_env.sh"
+unset_env_file_name="${COMPOSE_ENV_DIR}/unset_env.sh"
echo "#!/bin/bash" > "${unset_env_file_name}"
echo "# Was auto generated by $SCRIPT" >> "${unset_env_file_name}"
while IFS='=' read -r key value; do
@@ -207,6 +287,6 @@ while IFS='=' read -r key value; do
# Print the content before the '=' sign
echo "unset $key" >> "${unset_env_file_name}"
fi
-done < ".env"
+done < "${COMPOSE_ENV_DIR}/.env"
chmod +x "$unset_env_file_name"
diff --git a/scripts/compose_generate_globus_files.sh b/scripts/compose_generate_globus_files.sh
new file mode 100755
index 000000000..bb488f336
--- /dev/null
+++ b/scripts/compose_generate_globus_files.sh
@@ -0,0 +1,80 @@
+#!/bin/bash
+SCRIPT=$(realpath "$0")
+SOURCE=$(dirname "$SCRIPT")
+PROJECT_ROOT=$(realpath "${SOURCE}/..")
+
+# This script should be run after generating the .env file as it will pull
+# values from the .env file
+Help()
+{
+ echo "$(basename $0) generate globus files. Note the .env file must exist."
+ echo " in the specified directory."
+ echo
+ echo "Syntax: $(basename $0) [-h|d]"
+ echo "options:"
+ echo "-h, --help Print this help message"
+ echo "-d, --directory Directory where globus folder will be"
+ echo " created."
+}
+
+VALID_ARGS=$(getopt -o hd: --long 'help',directory: -- "$@")
+
+DIRECTORY=""
+eval set -- "$VALID_ARGS"
+while [ : ]; do
+ case "$1" in
+ -h | --help)
+ Help
+ exit 0
+ ;;
+ -d | --directory)
+ DIRECTORY="$2"
+ shift 2
+ ;;
+ --) shift;
+ break
+ ;;
+ \?) # incorrect option
+ echo "Error: Invalid option"
+ exit;;
+ esac
+done
+
+if [ ! -d "${DIRECTORY}" ]
+then
+ echo "The provided directory does not seem to exist: ${DIRECTORY}"
+fi
+
+if [ ! -f "${DIRECTORY}/.env" ]
+then
+ echo "Missing . ${DIRECTORY}/.env file. This file needs to be"
+ echo "created first"
+ exit 1
+fi
+
+
+
+# This script should be run after generating the .env file as it will pull
+# values from the .env file
+
+if [ ! -f "${DIRECTORY}/.env" ]
+then
+ echo "Missing . ${DIRECTORY}/.env file. This file needs to be"
+ echo "created first"
+ exit 1
+fi
+
+local_DATAFED_GLOBUS_KEY_DIR="${DIRECTORY}/globus"
+if [ ! -d "$local_DATAFED_GLOBUS_KEY_DIR" ]
+then
+ mkdir -p "$local_DATAFED_GLOBUS_KEY_DIR"
+fi
+
+. "${DIRECTORY}/.env"
+
+DATAFED_GLOBUS_DEPLOYMENT_KEY_PATH="$DATAFED_HOST_DEPLOYMENT_KEY_PATH" \
+DATAFED_GLOBUS_CRED_FILE_PATH="$DATAFED_HOST_CRED_FILE_PATH" \
+DATAFED_GLOBUS_CONTROL_PORT="$DATAFED_GLOBUS_CONTROL_PORT" \
+DATAFED_GLOBUS_SUBSCRIPTION="$DATAFED_GLOBUS_SUBSCRIPTION" \
+DATAFED_GCS_ROOT_NAME="$DATAFED_GCS_ROOT_NAME" \
+ python3 "${PROJECT_ROOT}/scripts/globus/initialize_globus_endpoint.py"