From 5e1fdf88c4b20afba21ea6e6f07f3e52de084a1c Mon Sep 17 00:00:00 2001 From: Jason Ross Date: Fri, 23 Sep 2022 20:36:13 -0400 Subject: [PATCH 01/32] restructured the way docker is handled --- docker/.env | 2 +- docker/Dockerfile | 56 +-------- docker/Dockerfile-aws | 39 +++++++ docker/Dockerfile-azure | 39 +++++++ docker/Dockerfile-base | 45 ++++++++ docker/Dockerfile-gcp | 39 +++++++ docker/bin/container-install-prereqs.sh | 8 +- docker/build.sh | 144 ++++++++++++++++++++---- docker/config/aws.env | 6 + docker/config/azure.env | 6 + docker/config/{build.env => base.env} | 8 +- docker/config/gcp.env | 6 + docker/config/ibm.env | 7 ++ docker/docker-compose.yaml | 4 +- docker/tag.sh | 22 ++++ 15 files changed, 346 insertions(+), 85 deletions(-) mode change 100644 => 120000 docker/Dockerfile create mode 100644 docker/Dockerfile-aws create mode 100644 docker/Dockerfile-azure create mode 100644 docker/Dockerfile-base create mode 100644 docker/Dockerfile-gcp create mode 100644 docker/config/aws.env create mode 100644 docker/config/azure.env rename docker/config/{build.env => base.env} (78%) create mode 100644 docker/config/gcp.env create mode 100644 docker/config/ibm.env create mode 100755 docker/tag.sh diff --git a/docker/.env b/docker/.env index eedf83312..adb6df087 120000 --- a/docker/.env +++ b/docker/.env @@ -1 +1 @@ -config/build.env \ No newline at end of file +config/base.env \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile deleted file mode 100644 index 9c49bcc40..000000000 --- a/docker/Dockerfile +++ /dev/null @@ -1,55 +0,0 @@ -FROM python:3.8 - -LABEL maintainer="Jason Ross " - -ARG BUILD_DATE -ARG NAME -ARG DESCRIPTION -ARG VCS_REF -ARG VCS_URL -ARG VENDOR -ARG VERSION -ARG IMAGE_NAME - -ENV DEBIAN_FRONTEND=${DEBIAN_FRONTEND} -ENV TERM=${TERM} -ENV IBMCLOUD_COLOR=${IBMCLOUD_COLOR} - -# Build-time metadata as defined at http://label-schema.org -LABEL \ - org.label-schema.schema-version="1.0" \ - org.label-schema.build-date="${BUILD_DATE}" \ - org.label-schema.name="${NAME}" \ - org.label-schema.description="${DESCRIPTION}" \ - org.label-schema.vcs-ref="${VCS_REF}" \ - org.label-schema.vcs-url="${VCS_URL}" \ - org.label-schema.vendor="${VENDOR}" \ - org.label-schema.version="${VERSION}" \ - org.label.image-name="${IMAGE_NAME}" - -# Copy helper scripts to container -ADD bin /root/bin - -# Install required software -RUN ["/bin/bash", "-c", "/root/bin/container-install-prereqs.sh"] - -# Install AWS CLI -RUN ["/bin/bash", "-c", "/root/bin/container-install-aws2.sh"] - -# Install Azure CLI -RUN ["/bin/bash", "-c", "/root/bin/container-install-azure.sh"] - -# Install gCloud SDK -RUN ["/bin/bash", "-c", "/root/bin/container-install-gcp.sh"] - -# Install ScoutSuite -RUN ["/bin/bash", "-c", "/root/bin/container-install-scoutsuite.sh"] - -# Set a nice message -RUN ["/bin/bash", "-c", "/root/bin/container-set-init.sh"] - -# Remove scripts -RUN ["rm", "-rf", "/root/bin"] - -# Command -CMD ["/bin/bash"] diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 120000 index 000000000..b586e5c2b --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1 @@ +Dockerfile-base \ No newline at end of file diff --git a/docker/Dockerfile-aws b/docker/Dockerfile-aws new file mode 100644 index 000000000..0289feee4 --- /dev/null +++ b/docker/Dockerfile-aws @@ -0,0 +1,39 @@ +FROM nccgroup/scoutsuite-base:5.12.0-01 + +LABEL maintainer="Jason Ross " + +ARG BUILD_DATE +ARG NAME +ARG DESCRIPTION +ARG VCS_REF +ARG VCS_URL +ARG VENDOR +ARG VERSION +ARG IMAGE_NAME + +ENV DEBIAN_FRONTEND=${DEBIAN_FRONTEND} +ENV TERM=${TERM} + +# Build-time metadata as defined at http://label-schema.org +LABEL \ + org.label-schema.schema-version="1.0" \ + org.label-schema.build-date="${BUILD_DATE}" \ + org.label-schema.name="${NAME}" \ + org.label-schema.description="${DESCRIPTION}" \ + org.label-schema.vcs-ref="${VCS_REF}" \ + org.label-schema.vcs-url="${VCS_URL}" \ + org.label-schema.vendor="${VENDOR}" \ + org.label-schema.version="${VERSION}" \ + org.label.image-name="${IMAGE_NAME}" + +# Copy helper scripts to container +ADD bin /root/bin + +# Install AWS CLI +RUN ["/bin/bash", "-c", "/root/bin/container-install-aws2.sh"] + +# Remove scripts +RUN ["rm", "-rf", "/root/bin"] + +# Command +CMD ["/bin/bash"] diff --git a/docker/Dockerfile-azure b/docker/Dockerfile-azure new file mode 100644 index 000000000..8ed1a11ea --- /dev/null +++ b/docker/Dockerfile-azure @@ -0,0 +1,39 @@ +FROM nccgroup/scoutsuite-base:5.12.0-01 + +LABEL maintainer="Jason Ross " + +ARG BUILD_DATE +ARG NAME +ARG DESCRIPTION +ARG VCS_REF +ARG VCS_URL +ARG VENDOR +ARG VERSION +ARG IMAGE_NAME + +ENV DEBIAN_FRONTEND=${DEBIAN_FRONTEND} +ENV TERM=${TERM} + +# Build-time metadata as defined at http://label-schema.org +LABEL \ + org.label-schema.schema-version="1.0" \ + org.label-schema.build-date="${BUILD_DATE}" \ + org.label-schema.name="${NAME}" \ + org.label-schema.description="${DESCRIPTION}" \ + org.label-schema.vcs-ref="${VCS_REF}" \ + org.label-schema.vcs-url="${VCS_URL}" \ + org.label-schema.vendor="${VENDOR}" \ + org.label-schema.version="${VERSION}" \ + org.label.image-name="${IMAGE_NAME}" + +# Copy helper scripts to container +ADD bin /root/bin + +# Install Azure CLI +RUN ["/bin/bash", "-c", "/root/bin/container-install-azure.sh"] + +# Remove scripts +RUN ["rm", "-rf", "/root/bin"] + +# Command +CMD ["/bin/bash"] diff --git a/docker/Dockerfile-base b/docker/Dockerfile-base new file mode 100644 index 000000000..13ac913f9 --- /dev/null +++ b/docker/Dockerfile-base @@ -0,0 +1,45 @@ +FROM python:3.10 + +LABEL maintainer="Jason Ross " + +ARG BUILD_DATE +ARG NAME +ARG DESCRIPTION +ARG VCS_REF +ARG VCS_URL +ARG VENDOR +ARG VERSION +ARG IMAGE_NAME + +ENV DEBIAN_FRONTEND=${DEBIAN_FRONTEND} +ENV TERM=${TERM} + +# Build-time metadata as defined at http://label-schema.org +LABEL \ + org.label-schema.schema-version="1.0" \ + org.label-schema.build-date="${BUILD_DATE}" \ + org.label-schema.name="${NAME}" \ + org.label-schema.description="${DESCRIPTION}" \ + org.label-schema.vcs-ref="${VCS_REF}" \ + org.label-schema.vcs-url="${VCS_URL}" \ + org.label-schema.vendor="${VENDOR}" \ + org.label-schema.version="${VERSION}" \ + org.label.image-name="${IMAGE_NAME}" + +# Copy helper scripts to container +ADD bin /root/bin + +# Install required software +RUN ["/bin/bash", "-c", "/root/bin/container-install-prereqs.sh"] + +# Install ScoutSuite +RUN ["/bin/bash", "-c", "/root/bin/container-install-scoutsuite.sh"] + +# Set a nice message +RUN ["/bin/bash", "-c", "/root/bin/container-set-init.sh"] + +# Remove scripts +RUN ["rm", "-rf", "/root/bin"] + +# Command +CMD ["/bin/bash"] diff --git a/docker/Dockerfile-gcp b/docker/Dockerfile-gcp new file mode 100644 index 000000000..823245c8c --- /dev/null +++ b/docker/Dockerfile-gcp @@ -0,0 +1,39 @@ +FROM nccgroup/scoutsuite-base:5.12.0-01 + +LABEL maintainer="Jason Ross " + +ARG BUILD_DATE +ARG NAME +ARG DESCRIPTION +ARG VCS_REF +ARG VCS_URL +ARG VENDOR +ARG VERSION +ARG IMAGE_NAME + +ENV DEBIAN_FRONTEND=${DEBIAN_FRONTEND} +ENV TERM=${TERM} + +# Build-time metadata as defined at http://label-schema.org +LABEL \ + org.label-schema.schema-version="1.0" \ + org.label-schema.build-date="${BUILD_DATE}" \ + org.label-schema.name="${NAME}" \ + org.label-schema.description="${DESCRIPTION}" \ + org.label-schema.vcs-ref="${VCS_REF}" \ + org.label-schema.vcs-url="${VCS_URL}" \ + org.label-schema.vendor="${VENDOR}" \ + org.label-schema.version="${VERSION}" \ + org.label.image-name="${IMAGE_NAME}" + +# Copy helper scripts to container +ADD bin /root/bin + +# Install gCloud SDK +RUN ["/bin/bash", "-c", "/root/bin/container-install-gcp.sh"] + +# Remove scripts +RUN ["rm", "-rf", "/root/bin"] + +# Command +CMD ["/bin/bash"] diff --git a/docker/bin/container-install-prereqs.sh b/docker/bin/container-install-prereqs.sh index af4173210..e73f4c2b3 100755 --- a/docker/bin/container-install-prereqs.sh +++ b/docker/bin/container-install-prereqs.sh @@ -29,13 +29,13 @@ apt-get install -qy \ less \ lsb-release \ nano \ - python3 \ - python3-pip \ + # python3 \ + # python3-pip \ tzdata \ unzip \ vim \ - virtualenv \ - virtualenvwrapper \ + # virtualenv \ + # virtualenvwrapper \ wget echo -e "\n\nSoftware Pre-reqs Installation Complete!\n\n" diff --git a/docker/build.sh b/docker/build.sh index 1ef82ac22..38f6df5c4 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -1,20 +1,126 @@ #!/bin/bash -echo -e "\n\nbuild running...\n" -source ./config/build.env - -BUILD_CMD="docker build \ --t ${IMAGE_NAME} \ --t ${IMAGE_NAME} \ ---build-arg BUILD_DATE=${BUILD_DATE} \ ---build-arg NAME=${NAME} \ ---build-arg VCS_REF=${VCS_REF} \ ---build-arg VCS_URL=${VCS_URL} \ ---build-arg VENDOR=${VENDOR} \ ---build-arg VERSION=${VERSION} \ ---build-arg IMAGE_NAME=${IMAGE_NAME} \ -." -# wtf. idk why this doesn't work -# --build-arg DESCRIPTION=\"${DESCRIPTION}\" \ - -echo -e "\n\nbuilding image using:\n${BUILD_CMD}" -exec ${BUILD_CMD} + +SEP1="==============================" +SEP2="------------------------------" + +echo -e "\n\n${SEP1}\n" +echo -e "\nBEGINNING BUILD...\n" + +case $1 in + + "base") + ##################### + #### BASE IMAGE #### + ##################### + echo -e "\n${SEP2}\nbuilding base image...\n" + source ./config/base.env + + BUILD_CMD="docker build \ + -f Dockerfile-base \ + -t ${IMAGE_NAME} \ + --build-arg BUILD_DATE=${BUILD_DATE} \ + --build-arg NAME=${NAME} \ + --build-arg VCS_REF=${VCS_REF} \ + --build-arg VCS_URL=${VCS_URL} \ + --build-arg VENDOR=${VENDOR} \ + --build-arg VERSION=${VERSION} \ + --build-arg IMAGE_NAME=${IMAGE_NAME} \ + ." + + echo -e "\n\nbuilding image using:\n${BUILD_CMD}" + exec ${BUILD_CMD} + echo -e "\nbase image build complete!\n${SEP2}\n" + ;; + + "aws") + ##################### + #### AWS IMAGE #### + ##################### + + echo -e "\n${SEP2}\nbuilding aws image...\n" + source ./config/base.env + source ./config/aws.env + + BUILD_CMD="docker build \ + -f Dockerfile-aws \ + -t ${IMAGE_NAME} \ + --build-arg BUILD_DATE=${BUILD_DATE} \ + --build-arg NAME=${NAME} \ + --build-arg VCS_REF=${VCS_REF} \ + --build-arg VCS_URL=${VCS_URL} \ + --build-arg VENDOR=${VENDOR} \ + --build-arg VERSION=${VERSION} \ + --build-arg IMAGE_NAME=${IMAGE_NAME} \ + ." + + echo -e "\n\nbuilding image using:\n${BUILD_CMD}" + exec ${BUILD_CMD} + echo -e "\naws image build complete!\n${SEP2}\n" + ;; + + "gcp") + ##################### + #### GCP IMAGE #### + ##################### + + echo -e "\n${SEP2}\nbuilding gcp image...\n" + source ./config/base.env + source ./config/gcp.env + + BUILD_CMD="docker build \ + -f Dockerfile-gcp \ + -t ${IMAGE_NAME} \ + --build-arg BUILD_DATE=${BUILD_DATE} \ + --build-arg NAME=${NAME} \ + --build-arg VCS_REF=${VCS_REF} \ + --build-arg VCS_URL=${VCS_URL} \ + --build-arg VENDOR=${VENDOR} \ + --build-arg VERSION=${VERSION} \ + --build-arg IMAGE_NAME=${IMAGE_NAME} \ + ." + + echo -e "\n\nbuilding image using:\n${BUILD_CMD}" + exec ${BUILD_CMD} + echo -e "\ngcp image build complete!\n${SEP2}\n" + ;; + + "azure") + ##################### + #### AZURE IMAGE #### + ##################### + echo -e "\n${SEP2}\nbuilding azure image...\n" + source ./config/base.env + source ./config/azure.env + + BUILD_CMD="docker build \ + -f Dockerfile-azure \ + -t ${IMAGE_NAME} \ + --build-arg BUILD_DATE=${BUILD_DATE} \ + --build-arg NAME=${NAME} \ + --build-arg VCS_REF=${VCS_REF} \ + --build-arg VCS_URL=${VCS_URL} \ + --build-arg VENDOR=${VENDOR} \ + --build-arg VERSION=${VERSION} \ + --build-arg IMAGE_NAME=${IMAGE_NAME} \ + ." + + echo -e "\n\nbuilding image using:\n${BUILD_CMD}" + exec ${BUILD_CMD} + ;; + + "all") + $0 base + $0 aws + $0 gcp + $0 azure + ;; + + *) + echo -e "\nUsage: $0 [base | aws | gcp | azure | all ]" + echo -e "Using default: base\n" + $0 base + ;; + +esac + +echo -e "\n${SEP1}\nBUILD COMPLETE!...\n" \ No newline at end of file diff --git a/docker/config/aws.env b/docker/config/aws.env new file mode 100644 index 000000000..4b346ec8a --- /dev/null +++ b/docker/config/aws.env @@ -0,0 +1,6 @@ +CLOUD='aws' +IMAGE_NAME="${VENDOR}/${NAME}-${CLOUD}:${VERSION}" + +# These are passed in as env vars to the container at runtime +DEBIAN_FRONTEND=noninteractive +TERM=linux \ No newline at end of file diff --git a/docker/config/azure.env b/docker/config/azure.env new file mode 100644 index 000000000..aa19957c8 --- /dev/null +++ b/docker/config/azure.env @@ -0,0 +1,6 @@ +CLOUD='azure' +IMAGE_NAME="${VENDOR}/${NAME}-${CLOUD}:${VERSION}" + +# These are passed in as env vars to the container at runtime +DEBIAN_FRONTEND=noninteractive +TERM=linux \ No newline at end of file diff --git a/docker/config/build.env b/docker/config/base.env similarity index 78% rename from docker/config/build.env rename to docker/config/base.env index cd22fe07a..98ef442ac 100644 --- a/docker/config/build.env +++ b/docker/config/base.env @@ -1,13 +1,13 @@ VCS_REF=$(git rev-parse --short HEAD) VCS_URL='https://github.com/nccgroup/ScoutSuite' -VERSION='0.3.0' +VERSION='5.12.0-01' BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") VENDOR='nccgroup' NAME='scoutsuite' +CLOUD='base' DESCRIPTION='A ready-to-go NCC Group ScoutSuite container based on Ubuntu.' -IMAGE_NAME="${VENDOR}/${NAME}:${VERSION}" +IMAGE_NAME="${VENDOR}/${NAME}-${CLOUD}:${VERSION}" # These are passed in as env vars to the container at runtime -IBMCLOUD_COLOR=true DEBIAN_FRONTEND=noninteractive -TERM=linux \ No newline at end of file +TERM=linux diff --git a/docker/config/gcp.env b/docker/config/gcp.env new file mode 100644 index 000000000..55eee77af --- /dev/null +++ b/docker/config/gcp.env @@ -0,0 +1,6 @@ +CLOUD='gcp' +IMAGE_NAME="${VENDOR}/${NAME}-${CLOUD}:${VERSION}" + +# These are passed in as env vars to the container at runtime +DEBIAN_FRONTEND=noninteractive +TERM=linux \ No newline at end of file diff --git a/docker/config/ibm.env b/docker/config/ibm.env new file mode 100644 index 000000000..3a4a4dc5e --- /dev/null +++ b/docker/config/ibm.env @@ -0,0 +1,7 @@ +CLOUD='ibm' +IMAGE_NAME="${VENDOR}/${NAME}-${CLOUD}:${VERSION}" + +# These are passed in as env vars to the container at runtime +IBMCLOUD_COLOR=true +DEBIAN_FRONTEND=noninteractive +TERM=linux \ No newline at end of file diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 296424a62..ba4c0eeaf 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -1,4 +1,4 @@ -version: "3.8" +version: "3.9" services: ncc-scoutsuite: image: scoutsuite:latest @@ -15,4 +15,4 @@ services: - VENDOR=${VENDOR} - NAME=${NAME} - IMAGE_NAME=${IMAGE_NAME} - - DESCRIPTION=${DESCRIPTION} + - DESCRIPTION=${DESCRIPTION} \ No newline at end of file diff --git a/docker/tag.sh b/docker/tag.sh new file mode 100755 index 000000000..062d059de --- /dev/null +++ b/docker/tag.sh @@ -0,0 +1,22 @@ +#!/bin/bash +source .env +#echo ${VERSION} +docker tag nccgroup/scoutsuite-aws:${VERSION} rossja/scoutsuite-aws:${VERSION} +docker tag nccgroup/scoutsuite-azure:${VERSION} rossja/scoutsuite-azure:${VERSION} +docker tag nccgroup/scoutsuite-gcp:${VERSION} rossja/scoutsuite-gcp:${VERSION} +docker tag nccgroup/scoutsuite-base:${VERSION} rossja/scoutsuite-base:${VERSION} + +docker tag rossja/scoutsuite-aws:${VERSION} rossja/scoutsuite-aws:latest +docker tag rossja/scoutsuite-azure:${VERSION} rossja/scoutsuite-azure:latest +docker tag rossja/scoutsuite-gcp:${VERSION} rossja/scoutsuite-gcp:latest +docker tag rossja/scoutsuite-base:${VERSION} rossja/scoutsuite-base:latest + +docker push rossja/scoutsuite-aws:${VERSION} +docker push rossja/scoutsuite-azure:${VERSION} +docker push rossja/scoutsuite-gcp:${VERSION} +docker push rossja/scoutsuite-base:${VERSION} + +docker push rossja/scoutsuite-aws:latest +docker push rossja/scoutsuite-azure:latest +docker push rossja/scoutsuite-gcp:latest +docker push rossja/scoutsuite-base:latest From 44c9e2cdb0817bfb2f04a09a0e82af7e30ea2e3e Mon Sep 17 00:00:00 2001 From: Jason Ross Date: Fri, 23 Sep 2022 21:05:05 -0400 Subject: [PATCH 02/32] rm compose to eliminate confusion --- docker/docker-compose.yaml | 18 ------------------ 1 file changed, 18 deletions(-) delete mode 100644 docker/docker-compose.yaml diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml deleted file mode 100644 index ba4c0eeaf..000000000 --- a/docker/docker-compose.yaml +++ /dev/null @@ -1,18 +0,0 @@ -version: "3.9" -services: - ncc-scoutsuite: - image: scoutsuite:latest - env_file: - - config/build.env - build: - context: . - dockerfile: Dockerfile - args: - - VCS_REF=${VCS_REF} - - VCS_URL=${VCS_URL} - - VERSION=${VERSION} - - BUILD_DATE=${BUILD_DATE} - - VENDOR=${VENDOR} - - NAME=${NAME} - - IMAGE_NAME=${IMAGE_NAME} - - DESCRIPTION=${DESCRIPTION} \ No newline at end of file From 8cd0f3b0cd7cac625b7f9582a5a78898a8b6ca2e Mon Sep 17 00:00:00 2001 From: x4v13r64 Date: Thu, 23 Mar 2023 14:17:05 +0100 Subject: [PATCH 03/32] Make test case insensitive --- ScoutSuite/core/conditions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ScoutSuite/core/conditions.py b/ScoutSuite/core/conditions.py index 8833d71d3..08aa3a7f0 100755 --- a/ScoutSuite/core/conditions.py +++ b/ScoutSuite/core/conditions.py @@ -115,9 +115,9 @@ def pass_condition(b, test, a): # Dictionary keys tests elif test == 'withKey': - result = (a in b) + result = a.lower() in map(str.lower, b) elif test == 'withoutKey': - result = a not in b + result = a.lower() not in map(str.lower, b) # String test elif test == 'containString': From e5a30c78f7e36a515f4ffba54e20c0e6403beb1b Mon Sep 17 00:00:00 2001 From: x4v13r64 Date: Thu, 23 Mar 2023 17:37:41 +0100 Subject: [PATCH 04/32] Add case insensitive conditions --- ScoutSuite/core/conditions.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/ScoutSuite/core/conditions.py b/ScoutSuite/core/conditions.py index 08aa3a7f0..d0f140beb 100755 --- a/ScoutSuite/core/conditions.py +++ b/ScoutSuite/core/conditions.py @@ -115,8 +115,12 @@ def pass_condition(b, test, a): # Dictionary keys tests elif test == 'withKey': - result = a.lower() in map(str.lower, b) + result = a in b elif test == 'withoutKey': + result = a not in b + elif test == 'withKeyCaseInsensitive': + result = a.lower() in map(str.lower, b) + elif test == 'withoutKeyCaseInsensitive': result = a.lower() not in map(str.lower, b) # String test From 3f456a756dc8a6c76c3b85e9316854062cb293eb Mon Sep 17 00:00:00 2001 From: x4v13r64 Date: Thu, 23 Mar 2023 17:39:42 +0100 Subject: [PATCH 05/32] Include case insensitive checks --- .../aws/rules/conditions/policy-statement-any-principal.json | 2 +- .../findings/iam-assume-role-lacks-external-id-and-mfa.json | 2 +- .../providers/aws/rules/findings/iam-assume-role-no-mfa.json | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ScoutSuite/providers/aws/rules/conditions/policy-statement-any-principal.json b/ScoutSuite/providers/aws/rules/conditions/policy-statement-any-principal.json index 1ff894a9e..f95e50f46 100755 --- a/ScoutSuite/providers/aws/rules/conditions/policy-statement-any-principal.json +++ b/ScoutSuite/providers/aws/rules/conditions/policy-statement-any-principal.json @@ -2,7 +2,7 @@ "conditions": [ "or", [ "_STATEMENT_.Principal", "containAtLeastOneOf", "*" ], [ "and", - [ "_STATEMENT_.Principal", "withKey", "AWS" ], + [ "_STATEMENT_.Principal", "withKeyCaseInsensitive", "AWS" ], [ "_STATEMENT_.Principal.AWS", "containAtLeastOneOf", "*" ] ] ] diff --git a/ScoutSuite/providers/aws/rules/findings/iam-assume-role-lacks-external-id-and-mfa.json b/ScoutSuite/providers/aws/rules/findings/iam-assume-role-lacks-external-id-and-mfa.json index e429bf822..1bd5d6549 100755 --- a/ScoutSuite/providers/aws/rules/findings/iam-assume-role-lacks-external-id-and-mfa.json +++ b/ScoutSuite/providers/aws/rules/findings/iam-assume-role-lacks-external-id-and-mfa.json @@ -22,7 +22,7 @@ ], [ "iam.roles.id.assume_role_policy.PolicyDocument.Statement.id.Principal", - "withKey", + "withKeyCaseInsensitive", "AWS" ], [ diff --git a/ScoutSuite/providers/aws/rules/findings/iam-assume-role-no-mfa.json b/ScoutSuite/providers/aws/rules/findings/iam-assume-role-no-mfa.json index e71e276e3..6a77db59a 100755 --- a/ScoutSuite/providers/aws/rules/findings/iam-assume-role-no-mfa.json +++ b/ScoutSuite/providers/aws/rules/findings/iam-assume-role-no-mfa.json @@ -21,7 +21,7 @@ ], [ "iam.roles.id.assume_role_policy.PolicyDocument.Statement.id.Principal", - "withKey", + "withKeyCaseInsensitive", "AWS" ], [ From 77388efa83d7552ef4412f9dbace2d50340eebe5 Mon Sep 17 00:00:00 2001 From: x4v13r64 Date: Thu, 23 Mar 2023 17:39:51 +0100 Subject: [PATCH 06/32] Include case insensitive checks and add conditions --- .../policy-statement-poor-condition.json | 219 ++++++++++++------ 1 file changed, 149 insertions(+), 70 deletions(-) diff --git a/ScoutSuite/providers/aws/rules/conditions/policy-statement-poor-condition.json b/ScoutSuite/providers/aws/rules/conditions/policy-statement-poor-condition.json index c70bb5768..cdec67bc2 100755 --- a/ScoutSuite/providers/aws/rules/conditions/policy-statement-poor-condition.json +++ b/ScoutSuite/providers/aws/rules/conditions/policy-statement-poor-condition.json @@ -1,75 +1,154 @@ { - "conditions": [ "or", - [ "_STATEMENT_.", "withoutKey", "Condition" ], + "conditions": [ "or", + [ "_STATEMENT_.", "withoutKey", "Condition" ], + [ "and", + [ "and", + [ "_STATEMENT_.Condition.", "withoutKey", "ArnEquals" ], + [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:ArnEquals" ] + ], + [ "and", + [ "_STATEMENT_.Condition.", "withoutKey", "ArnLike" ], + [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:ArnLike" ] + ], + [ "or", + [ "_STATEMENT_.Condition.", "withoutKey", "StringEquals" ], [ "and", - [ "and", - [ "_STATEMENT_.Condition.", "withoutKey", "ArnEquals" ], - [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:ArnEquals" ] - ], - [ "and", - [ "_STATEMENT_.Condition.", "withoutKey", "ArnLike" ], - [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:ArnLike" ] - ], - [ "or", - [ "_STATEMENT_.Condition.", "withoutKey", "StringEquals" ], - [ "and", - [ "_STATEMENT_.Condition.StringEquals.", "withoutKey", "AWS:SourceArn" ], - [ "_STATEMENT_.Condition.StringEquals.", "withoutKey", "AWS:SourceOwner" ], - [ "_STATEMENT_.Condition.StringEquals.", "withoutKey", "kms:ViaService" ], - [ "_STATEMENT_.Condition.StringEquals.", "withoutKey", "kms:CallerAccount" ], - [ "_STATEMENT_.Condition.StringEquals.", "withoutKey", "iam:PassedToService" ] - ] - ], - [ "or", - [ "_STATEMENT_.Condition.", "withoutKey", "StringEqualsIgnoreCase" ], - [ "and", - [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKey", "AWS:SourceArn" ], - [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKey", "AWS:SourceOwner" ], - [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKey", "kms:ViaService" ], - [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKey", "kms:CallerAccount" ], - [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKey", "iam:PassedToService" ] - ] - ], - [ "or", - [ "_STATEMENT_.Condition.", "withoutKey", "StringLike" ], - [ "and", - [ "_STATEMENT_.Condition.StringLike.", "withoutKey", "AWS:SourceArn" ], - [ "_STATEMENT_.Condition.StringLike.", "withoutKey", "AWS:SourceOwner" ], - [ "_STATEMENT_.Condition.StringLike.", "withoutKey", "kms:ViaService" ], - [ "_STATEMENT_.Condition.StringLike.", "withoutKey", "kms:CallerAccount" ], - [ "_STATEMENT_.Condition.StringLike.", "withoutKey", "iam:PassedToService" ] - ] - ], - [ "or", - [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:StringEquals" ], - [ "and", - [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKey", "AWS:SourceArn" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKey", "AWS:SourceOwner" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKey", "kms:ViaService" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKey", "kms:CallerAccount" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKey", "iam:PassedToService" ] - ] - ], - [ "or", - [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:StringEqualsIgnoreCase" ], - [ "and", - [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKey", "AWS:SourceArn" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKey", "AWS:SourceOwner" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKey", "kms:ViaService" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKey", "kms:CallerAccount" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKey", "iam:PassedToService" ] - ] - ], - [ "or", - [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:StringLike" ], - [ "and", - [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKey", "AWS:SourceArn" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKey", "AWS:SourceOwner" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKey", "kms:ViaService" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKey", "kms:CallerAccount" ], - [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKey", "iam:PassedToService" ] - ] - ] + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:CalledVia" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:CalledViaFirst" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:CalledViaLast" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalAccount" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalArn" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgPaths" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgID" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceName" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceNamesList" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:ResourceTag" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:SourceAccount" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:SourceArn" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:SourceIdentity" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:SourceOwner" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "aws:ViaAWSService" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "iam:PassedToService" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "kms:CallerAccount" ], + [ "_STATEMENT_.Condition.StringEquals.", "withoutKeyCaseInsensitive", "kms:ViaService" ] ] + ], + [ "or", + [ "_STATEMENT_.Condition.", "withoutKey", "StringEqualsIgnoreCase" ], + [ "and", + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:CalledVia" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:CalledViaFirst" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:CalledViaLast" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalAccount" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalArn" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgPaths" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgID" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceName" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceNamesList" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:ResourceTag" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:SourceAccount" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:SourceArn" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:SourceIdentity" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:SourceOwner" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:ViaAWSService" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "iam:PassedToService" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "kms:CallerAccount" ], + [ "_STATEMENT_.Condition.StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "kms:ViaService" ] + ] + ], + [ "or", + [ "_STATEMENT_.Condition.", "withoutKey", "StringLike" ], + [ "and", + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:CalledVia" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:CalledViaFirst" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:CalledViaLast" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalAccount" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalArn" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgPaths" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgID" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceName" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceNamesList" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:ResourceTag" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:SourceAccount" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:SourceArn" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:SourceIdentity" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:SourceOwner" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "aws:ViaAWSService" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "iam:PassedToService" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "kms:CallerAccount" ], + [ "_STATEMENT_.Condition.StringLike.", "withoutKeyCaseInsensitive", "kms:ViaService" ] + ] + ], + [ "or", + [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:StringEquals" ], + [ "and", + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:CalledVia" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:CalledViaFirst" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:CalledViaLast" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalArn" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgPaths" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgID" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceName" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceNamesList" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:ResourceTag" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:SourceAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:SourceArn" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:SourceIdentity" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:SourceOwner" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "aws:ViaAWSService" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "iam:PassedToService" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "kms:CallerAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEquals.", "withoutKeyCaseInsensitive", "kms:ViaService" ] + ] + ], + [ "or", + [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:StringEqualsIgnoreCase" ], + [ "and", + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:CalledVia" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:CalledViaFirst" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:CalledViaLast" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalArn" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgPaths" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgID" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceName" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceNamesList" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:ResourceTag" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:SourceAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:SourceArn" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:SourceIdentity" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:SourceOwner" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "aws:ViaAWSService" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "iam:PassedToService" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "kms:CallerAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringEqualsIgnoreCase.", "withoutKeyCaseInsensitive", "kms:ViaService" ] + ] + ], + [ "or", + [ "_STATEMENT_.Condition.", "withoutKey", "ForAnyValue:StringLike" ], + [ "and", + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:CalledVia" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:CalledViaFirst" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:CalledViaLast" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalArn" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgPaths" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalOrgID" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceName" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:PrincipalServiceNamesList" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:ResourceTag" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:SourceAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:SourceArn" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:SourceIdentity" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:SourceOwner" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "aws:ViaAWSService" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "iam:PassedToService" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "kms:CallerAccount" ], + [ "_STATEMENT_.Condition.ForAnyValue:StringLike.", "withoutKeyCaseInsensitive", "kms:ViaService" ] + + ] + ] ] + ] } From b3e33df8bd35f0e99071af87350fe935991a7aa0 Mon Sep 17 00:00:00 2001 From: Jason Ross Date: Mon, 16 Oct 2023 12:44:05 -0400 Subject: [PATCH 07/32] updated for 5.13 --- docker/Dockerfile-aws | 2 +- docker/Dockerfile-azure | 2 +- docker/Dockerfile-gcp | 2 +- docker/build.sh | 23 +++++++++++++++++------ docker/config/base.env | 2 +- 5 files changed, 21 insertions(+), 10 deletions(-) diff --git a/docker/Dockerfile-aws b/docker/Dockerfile-aws index 0289feee4..c65dc86a4 100644 --- a/docker/Dockerfile-aws +++ b/docker/Dockerfile-aws @@ -1,4 +1,4 @@ -FROM nccgroup/scoutsuite-base:5.12.0-01 +FROM nccgroup/scoutsuite-base:5.13.0-01 LABEL maintainer="Jason Ross " diff --git a/docker/Dockerfile-azure b/docker/Dockerfile-azure index 8ed1a11ea..27ea075ea 100644 --- a/docker/Dockerfile-azure +++ b/docker/Dockerfile-azure @@ -1,4 +1,4 @@ -FROM nccgroup/scoutsuite-base:5.12.0-01 +FROM nccgroup/scoutsuite-base:5.13.0-01 LABEL maintainer="Jason Ross " diff --git a/docker/Dockerfile-gcp b/docker/Dockerfile-gcp index 823245c8c..edfa696cd 100644 --- a/docker/Dockerfile-gcp +++ b/docker/Dockerfile-gcp @@ -1,4 +1,4 @@ -FROM nccgroup/scoutsuite-base:5.12.0-01 +FROM nccgroup/scoutsuite-base:5.13.0-01 LABEL maintainer="Jason Ross " diff --git a/docker/build.sh b/docker/build.sh index 38f6df5c4..4fe0ef901 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -1,10 +1,18 @@ #!/bin/bash +# vars are stored in .env and config/base.env files +# note that the FROM used in the Dockerfile files +# needs to be updated to match the version in the env +# files in order for anything other than the base image +# to build correctly. +# TODO: fix this so that the FROM is set in the Dockerfile +# automatically by the env vars + SEP1="==============================" SEP2="------------------------------" -echo -e "\n\n${SEP1}\n" -echo -e "\nBEGINNING BUILD...\n" +echo -e "\n\n${SEP1}" +echo -e "BEGINNING BUILD..." case $1 in @@ -52,7 +60,7 @@ case $1 in --build-arg VERSION=${VERSION} \ --build-arg IMAGE_NAME=${IMAGE_NAME} \ ." - + echo -e "\n\nbuilding image using:\n${BUILD_CMD}" exec ${BUILD_CMD} echo -e "\naws image build complete!\n${SEP2}\n" @@ -116,9 +124,12 @@ case $1 in ;; *) - echo -e "\nUsage: $0 [base | aws | gcp | azure | all ]" - echo -e "Using default: base\n" - $0 base + echo -e "\nBUILD TARGET NOT FOUND!" + echo -e "\nUSAGE:\n $0 [base | aws | gcp | azure | all ]" + echo -e "${SEP1}" + exit 1 + # echo -e "Using default: all\n" + # $0 all ;; esac diff --git a/docker/config/base.env b/docker/config/base.env index 98ef442ac..ebc42e829 100644 --- a/docker/config/base.env +++ b/docker/config/base.env @@ -1,6 +1,6 @@ VCS_REF=$(git rev-parse --short HEAD) VCS_URL='https://github.com/nccgroup/ScoutSuite' -VERSION='5.12.0-01' +VERSION='5.13.0-01' BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") VENDOR='nccgroup' NAME='scoutsuite' From 2ffe244dc57dfe1e769d977a870adb66841f11c5 Mon Sep 17 00:00:00 2001 From: Jason Ross Date: Mon, 16 Oct 2023 16:17:15 -0400 Subject: [PATCH 08/32] changed to python 3.12 --- docker/Dockerfile-base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile-base b/docker/Dockerfile-base index 13ac913f9..45e700880 100644 --- a/docker/Dockerfile-base +++ b/docker/Dockerfile-base @@ -1,4 +1,4 @@ -FROM python:3.10 +FROM python:3.12 LABEL maintainer="Jason Ross " From 746807271db4fad2dc991b674770037bc08fbd45 Mon Sep 17 00:00:00 2001 From: Jason Ross Date: Mon, 16 Oct 2023 16:18:05 -0400 Subject: [PATCH 09/32] added a combined build to put all the tools into a single container --- docker/build.sh | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docker/build.sh b/docker/build.sh index 4fe0ef901..05176954d 100755 --- a/docker/build.sh +++ b/docker/build.sh @@ -116,6 +116,31 @@ case $1 in exec ${BUILD_CMD} ;; + "combined") + ##################### + ## COMBINED IMAGE ## + ##################### + echo -e "\n${SEP2}\nbuilding combined image...\n" + source ./config/base.env + source ./config/combined.env + + BUILD_CMD="docker build \ + -f Dockerfile \ + -t ${IMAGE_NAME} \ + --build-arg BUILD_DATE=${BUILD_DATE} \ + --build-arg NAME=${NAME} \ + --build-arg VCS_REF=${VCS_REF} \ + --build-arg VCS_URL=${VCS_URL} \ + --build-arg VENDOR=${VENDOR} \ + --build-arg VERSION=${VERSION} \ + --build-arg IMAGE_NAME=${IMAGE_NAME} \ + ." + + echo -e "\n\nbuilding image using:\n${BUILD_CMD}" + exec ${BUILD_CMD} + echo -e "\nbase image build complete!\n${SEP2}\n" + ;; + "all") $0 base $0 aws From f165e94ff37c3a6086a5928703d162e0dd61a2c4 Mon Sep 17 00:00:00 2001 From: Jason Ross Date: Mon, 16 Oct 2023 16:18:53 -0400 Subject: [PATCH 10/32] added combined env --- docker/config/combined.env | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 docker/config/combined.env diff --git a/docker/config/combined.env b/docker/config/combined.env new file mode 100644 index 000000000..c48158f6e --- /dev/null +++ b/docker/config/combined.env @@ -0,0 +1,5 @@ +IMAGE_NAME="${VENDOR}/${NAME}:${VERSION}" + +# These are passed in as env vars to the container at runtime +DEBIAN_FRONTEND=noninteractive +TERM=linux \ No newline at end of file From 4a9c2b7a2813cda6fc7229bdd4d393c96f470cb3 Mon Sep 17 00:00:00 2001 From: launchdaemon <7250222+launchdaemon@users.noreply.github.com> Date: Wed, 8 Nov 2023 09:35:25 +0000 Subject: [PATCH 11/32] Update route53-domain-transferlock-not-authorized.json Remove *.uk domains as they now support domain locks. --- .../route53-domain-transferlock-not-authorized.json | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/ScoutSuite/providers/aws/rules/findings/route53-domain-transferlock-not-authorized.json b/ScoutSuite/providers/aws/rules/findings/route53-domain-transferlock-not-authorized.json index 045fce9ff..783f54b66 100755 --- a/ScoutSuite/providers/aws/rules/findings/route53-domain-transferlock-not-authorized.json +++ b/ScoutSuite/providers/aws/rules/findings/route53-domain-transferlock-not-authorized.json @@ -15,7 +15,6 @@ ".*\\.ch$", ".*\\.cl$", ".*\\.co.nz$", - ".*\\.co.uk$", ".*\\.co.za$", ".*\\.com.ar$", ".*\\.com.au$", @@ -26,16 +25,13 @@ ".*\\.fr$", ".*\\.it$", ".*\\.jp$", - ".*\\.me.uk$", ".*\\.net.au$", ".*\\.net.nz$", ".*\\.nl$", ".*\\.org.nz$", - ".*\\.org.uk$", ".*\\.qa$", ".*\\.ru$", - ".*\\.se$", - ".*\\.uk$" + ".*\\.se$" ] ] ], From cd9b789826a9092edb1084e895ce19e3376721fc Mon Sep 17 00:00:00 2001 From: Rennie deGraaf Date: Fri, 19 Jan 2024 19:33:17 -0800 Subject: [PATCH 12/32] Rule to check if EBS default encryption is enabled. It seems to work but there are issues with the display; see "TKTK" comments in .../services.ec2.regions.id.regional_settings.html. Still needs tests. --- ...ices.ec2.regions.id.regional_settings.html | 28 +++++++++++++++++++ ScoutSuite/providers/aws/facade/ec2.py | 18 +++++++++++- ScoutSuite/providers/aws/metadata.json | 4 +++ .../providers/aws/resources/ec2/base.py | 6 ++-- .../aws/resources/ec2/regional_settings.py | 16 +++++++++++ .../ec2_ebs_default_encryption_disabled.json | 18 ++++++++++++ .../providers/aws/rules/rulesets/default.json | 6 ++++ .../aws/rules/rulesets/detailed.json | 6 ++++ 8 files changed, 99 insertions(+), 3 deletions(-) create mode 100644 ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html create mode 100644 ScoutSuite/providers/aws/resources/ec2/regional_settings.py create mode 100644 ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json diff --git a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html new file mode 100644 index 000000000..2f3017ae8 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html @@ -0,0 +1,28 @@ + + + + + + + + diff --git a/ScoutSuite/providers/aws/facade/ec2.py b/ScoutSuite/providers/aws/facade/ec2.py index 60ab932a3..2557d3573 100755 --- a/ScoutSuite/providers/aws/facade/ec2.py +++ b/ScoutSuite/providers/aws/facade/ec2.py @@ -218,4 +218,20 @@ async def get_route_tables(self, region): return route_tables except Exception as e: print_exception('Failed to get route tables: {}'.format(e)) - return [] \ No newline at end of file + return [] + + async def get_ebs_encryption(self, region): + ec2_client = AWSFacadeUtils.get_client('ec2', self.session, region) + try: + encryption_settings = await run_concurrently(lambda: ec2_client.get_ebs_encryption_by_default()['EbsEncryptionByDefault']) + return encryption_settings + except Exception as e: + print_exception(f'Failed to retrieve EBS encryption settings: {e}') + + async def get_ebs_default_encryption_key(self, region): + ec2_client = AWSFacadeUtils.get_client('ec2', self.session, region) + try: + encryption_key = await run_concurrently(lambda: ec2_client.get_ebs_default_kms_key_id()['KmsKeyId']) + return encryption_key + except Exception as e: + print_exception(f'Failed to retrieve EBS encryption key ID: {e}') diff --git a/ScoutSuite/providers/aws/metadata.json b/ScoutSuite/providers/aws/metadata.json index efd223944..804dbb9ed 100755 --- a/ScoutSuite/providers/aws/metadata.json +++ b/ScoutSuite/providers/aws/metadata.json @@ -213,6 +213,10 @@ "images": { "cols": 2, "path": "services.ec2.regions.id.images" + }, + "regional_settings": { + "cols": 2, + "path": "services.ec2.regions.id.regional_settings" } }, "summaries": { diff --git a/ScoutSuite/providers/aws/resources/ec2/base.py b/ScoutSuite/providers/aws/resources/ec2/base.py index 0a82deac7..1b843fb83 100755 --- a/ScoutSuite/providers/aws/resources/ec2/base.py +++ b/ScoutSuite/providers/aws/resources/ec2/base.py @@ -3,6 +3,7 @@ from ScoutSuite.providers.aws.resources.ec2.volumes import Volumes from ScoutSuite.providers.aws.resources.ec2.vpcs import Ec2Vpcs from ScoutSuite.providers.aws.resources.regions import Regions +from ScoutSuite.providers.aws.resources.ec2.regional_settings import RegionalSettings class EC2(Regions): @@ -10,7 +11,8 @@ class EC2(Regions): (Ec2Vpcs, 'vpcs'), (AmazonMachineImages, 'images'), (Snapshots, 'snapshots'), - (Volumes, 'volumes') + (Volumes, 'volumes'), + (RegionalSettings, 'regional_settings') ] def __init__(self, facade): @@ -26,7 +28,7 @@ async def fetch_all(self, regions=None, excluded_regions=None, partition_name='a sum([len(vpc['security_groups']) for vpc in self['regions'][region]['vpcs'].values()]) self['regions'][region]['network_interfaces_count'] =\ sum([len(vpc['network_interfaces']) for vpc in self['regions'][region]['vpcs'].values()]) - + self['instances_count'] =\ sum([region['instances_count'] for region in self['regions'].values()]) self['security_groups_count'] =\ diff --git a/ScoutSuite/providers/aws/resources/ec2/regional_settings.py b/ScoutSuite/providers/aws/resources/ec2/regional_settings.py new file mode 100644 index 000000000..6aae176fb --- /dev/null +++ b/ScoutSuite/providers/aws/resources/ec2/regional_settings.py @@ -0,0 +1,16 @@ +from ScoutSuite.providers.aws.resources.base import AWSResources +from ScoutSuite.providers.aws.facade.base import AWSFacade +from ScoutSuite.providers.aws.utils import get_name, format_arn + + +class RegionalSettings(AWSResources): + def __init__(self, facade: AWSFacade, region: str): + super().__init__(facade) + self.region = region + self.partition = facade.partition + self.service = 'ec2' + self.resource_type = 'regional_setting' + + async def fetch_all(self): + self['ebs_encryption_default'] = await self.facade.ec2.get_ebs_encryption(self.region) + self['ebs_default_encryption_key_id'] = await self.facade.ec2.get_ebs_default_encryption_key(self.region) diff --git a/ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json b/ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json new file mode 100644 index 000000000..cc623520f --- /dev/null +++ b/ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json @@ -0,0 +1,18 @@ +{ + "description": "EBS Encryption By Default Is Disabled", + "rationale": "Enabling EBS encryption by default ensures that all EBS Volumes created in the region are encrypted even if the operator neglects to opt into encryption when creating a Volume.", + "remediation": "Enable encryption by default for EBS volumes in all regions.", + "references": [ + "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default" + ], + "dashboard_name": "Regions", + "path": "ec2.regions.id.regional_settings.ebs_encryption_default", + "conditions": [ + "and", + [ + "ec2.regions.id.regional_settings.ebs_encryption_default", + "false", + "" + ] + ] +} \ No newline at end of file diff --git a/ScoutSuite/providers/aws/rules/rulesets/default.json b/ScoutSuite/providers/aws/rules/rulesets/default.json index b3afbb226..255c08138 100755 --- a/ScoutSuite/providers/aws/rules/rulesets/default.json +++ b/ScoutSuite/providers/aws/rules/rulesets/default.json @@ -142,6 +142,12 @@ "level": "danger" } ], + "ec2_ebs_default_encryption_disabled.json": [ + { + "enabled": true, + "level": "warning" + } + ], "ec2-instance-in-security-group.json": [ { "args": [ diff --git a/ScoutSuite/providers/aws/rules/rulesets/detailed.json b/ScoutSuite/providers/aws/rules/rulesets/detailed.json index d1043c5c1..004b7a5f6 100755 --- a/ScoutSuite/providers/aws/rules/rulesets/detailed.json +++ b/ScoutSuite/providers/aws/rules/rulesets/detailed.json @@ -142,6 +142,12 @@ "level": "danger" } ], + "ec2_ebs_default_encryption_disabled.json": [ + { + "enabled": true, + "level": "warning" + } + ], "ec2-instance-in-security-group.json": [ { "args": [ From 524b8074216abbf6b83dcf6580ea41ea833fc662 Mon Sep 17 00:00:00 2001 From: Rennie deGraaf Date: Mon, 22 Jan 2024 08:58:42 -0800 Subject: [PATCH 13/32] EBS default encryption rule: now highlights the setting in HTML. --- .../aws/services.ec2.regions.id.regional_settings.html | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html index 2f3017ae8..b4eec1f69 100644 --- a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html +++ b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html @@ -1,15 +1,17 @@ From 54147a09183258b72fcdcf2ef2888c06ed2b5cbb Mon Sep 17 00:00:00 2001 From: Asif Wani Date: Sun, 4 Feb 2024 23:57:13 +0530 Subject: [PATCH 14/32] added-digitalocean-support --- README.md | 1 + ScoutSuite/__main__.py | 17 ++ ScoutSuite/core/cli_parser.py | 32 +++ .../do/services.database.databases.html | 33 +++ .../do/services.droplet.droplets.html | 41 ++++ .../do/services.networking.domains.html | 27 +++ .../do/services.networking.firewalls.html | 35 +++ .../services.networking.load_balancers.html | 25 +++ .../partials/do/services.spaces.buckets.html | 26 +++ .../output/data/html/summaries/do/.gitkeep | 0 ScoutSuite/providers/__init__.py | 3 +- .../base/authentication_strategy_factory.py | 3 +- .../providers/do/authentication_strategy.py | 49 +++++ ScoutSuite/providers/do/facade/__init__.py | 0 ScoutSuite/providers/do/facade/base.py | 17 ++ ScoutSuite/providers/do/facade/database.py | 71 +++++++ ScoutSuite/providers/do/facade/droplet.py | 38 ++++ ScoutSuite/providers/do/facade/networking.py | 39 ++++ ScoutSuite/providers/do/facade/spaces.py | 199 ++++++++++++++++++ ScoutSuite/providers/do/metadata.json | 50 +++++ ScoutSuite/providers/do/provider.py | 50 +++++ ScoutSuite/providers/do/resources/__init__.py | 0 ScoutSuite/providers/do/resources/base.py | 22 ++ .../do/resources/database/__init__.py | 0 .../providers/do/resources/database/base.py | 14 ++ .../do/resources/database/databases.py | 66 ++++++ .../do/resources/droplet/__init__.py | 0 .../providers/do/resources/droplet/base.py | 14 ++ .../do/resources/droplet/droplets.py | 83 ++++++++ .../do/resources/networking/__init__.py | 0 .../providers/do/resources/networking/base.py | 20 ++ .../do/resources/networking/domains.py | 76 +++++++ .../do/resources/networking/firewalls.py | 47 +++++ .../do/resources/networking/load_balancers.py | 29 +++ .../providers/do/resources/spaces/__init__.py | 0 .../providers/do/resources/spaces/base.py | 14 ++ .../providers/do/resources/spaces/buckets.py | 43 ++++ .../providers/do/rules/filters/.gitkeep | 0 ...se-databases-mysql-publically-exposed.json | 20 ++ ...atabases-mysql-user-legacy-encryption.json | 20 ++ ...e-databases-postgres-connection-pools.json | 20 ++ ...base-databases-redis-evicition-policy.json | 20 ++ .../droplet-droplets-all-ports-exposed.json | 15 ++ .../droplet-droplets-backup-not-enabled.json | 17 ++ .../droplet-droplets-backup-not-present.json | 17 ++ ...roplet-droplets-firewall-not-attached.json | 17 ++ .../droplet-droplets-port-22-exposed.json | 15 ++ .../findings/networking-domains-high-ttl.json | 15 ++ .../networking-domains-missing-dkim.json | 15 ++ .../networking-domains-missing-dmarc.json | 15 ++ .../networking-domains-missing-spf.json | 15 ++ ...working-domains-spf-overly-permissive.json | 15 ++ .../networking-firewalls-public-ports.json | 15 ++ .../networking-firewalls-quad-zero.json | 15 ++ ...d-balancer-backend-keepalive-disabled.json | 15 ++ ...g-load-balancer-ssl-redirect-disabled.json | 15 ++ .../findings/spaces-buckets-public-read.json | 17 ++ .../findings/spaces-buckets-public-write.json | 17 ++ .../providers/do/rules/rulesets/default.json | 119 +++++++++++ .../providers/do/rules/rulesets/filters.json | 4 + ScoutSuite/providers/do/services.py | 23 ++ ScoutSuite/providers/do/utils.py | 23 ++ requirements.txt | 8 +- tools/process_raw_response.py | 5 +- 64 files changed, 1692 insertions(+), 4 deletions(-) create mode 100644 ScoutSuite/output/data/html/partials/do/services.database.databases.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.droplet.droplets.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.networking.domains.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.networking.firewalls.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.networking.load_balancers.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.spaces.buckets.html create mode 100644 ScoutSuite/output/data/html/summaries/do/.gitkeep create mode 100644 ScoutSuite/providers/do/authentication_strategy.py create mode 100644 ScoutSuite/providers/do/facade/__init__.py create mode 100644 ScoutSuite/providers/do/facade/base.py create mode 100644 ScoutSuite/providers/do/facade/database.py create mode 100644 ScoutSuite/providers/do/facade/droplet.py create mode 100644 ScoutSuite/providers/do/facade/networking.py create mode 100644 ScoutSuite/providers/do/facade/spaces.py create mode 100644 ScoutSuite/providers/do/metadata.json create mode 100644 ScoutSuite/providers/do/provider.py create mode 100644 ScoutSuite/providers/do/resources/__init__.py create mode 100644 ScoutSuite/providers/do/resources/base.py create mode 100644 ScoutSuite/providers/do/resources/database/__init__.py create mode 100644 ScoutSuite/providers/do/resources/database/base.py create mode 100644 ScoutSuite/providers/do/resources/database/databases.py create mode 100644 ScoutSuite/providers/do/resources/droplet/__init__.py create mode 100644 ScoutSuite/providers/do/resources/droplet/base.py create mode 100644 ScoutSuite/providers/do/resources/droplet/droplets.py create mode 100644 ScoutSuite/providers/do/resources/networking/__init__.py create mode 100644 ScoutSuite/providers/do/resources/networking/base.py create mode 100644 ScoutSuite/providers/do/resources/networking/domains.py create mode 100644 ScoutSuite/providers/do/resources/networking/firewalls.py create mode 100644 ScoutSuite/providers/do/resources/networking/load_balancers.py create mode 100644 ScoutSuite/providers/do/resources/spaces/__init__.py create mode 100644 ScoutSuite/providers/do/resources/spaces/base.py create mode 100644 ScoutSuite/providers/do/resources/spaces/buckets.py create mode 100644 ScoutSuite/providers/do/rules/filters/.gitkeep create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-mysql-publically-exposed.json create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-mysql-user-legacy-encryption.json create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-postgres-connection-pools.json create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-redis-evicition-policy.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-all-ports-exposed.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-enabled.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-present.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-firewall-not-attached.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-port-22-exposed.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-high-ttl.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-missing-dkim.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-missing-dmarc.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-missing-spf.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-spf-overly-permissive.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-firewalls-public-ports.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-firewalls-quad-zero.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-load-balancer-backend-keepalive-disabled.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-load-balancer-ssl-redirect-disabled.json create mode 100644 ScoutSuite/providers/do/rules/findings/spaces-buckets-public-read.json create mode 100644 ScoutSuite/providers/do/rules/findings/spaces-buckets-public-write.json create mode 100644 ScoutSuite/providers/do/rules/rulesets/default.json create mode 100644 ScoutSuite/providers/do/rules/rulesets/filters.json create mode 100644 ScoutSuite/providers/do/services.py create mode 100644 ScoutSuite/providers/do/utils.py diff --git a/README.md b/README.md index 93a3d1763..4d5acb98a 100755 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ The following cloud providers are currently supported: - Alibaba Cloud (alpha) - Oracle Cloud Infrastructure (alpha) - Kubernetes clusters on a cloud provider (alpha) +- DigitalOcean Cloud (alpha) ## Installation diff --git a/ScoutSuite/__main__.py b/ScoutSuite/__main__.py index 24fe31300..292267097 100755 --- a/ScoutSuite/__main__.py +++ b/ScoutSuite/__main__.py @@ -61,6 +61,10 @@ def run_from_cli(): kubernetes_context=args.get('kubernetes_context'), kubernetes_persist_config=args.get('kubernetes_persist_config'), kubernetes_azure_subscription_id=args.get('kubernetes_azure_subscription_id'), + #DigitalOcean + token=args.get('token'), + access_key=args.get('access_key'), + access_secret=args.get('access_secret'), # General report_name=args.get('report_name'), report_dir=args.get('report_dir'), timestamp=args.get('timestamp'), @@ -113,6 +117,10 @@ def run(provider, kubernetes_context=None, kubernetes_persist_config=True, kubernetes_azure_subscription_id=None, + #DigitalOcean + token=None, + access_key=None, + access_secret=None, # General report_name=None, report_dir=None, timestamp=False, @@ -171,6 +179,10 @@ async def _run(provider, kubernetes_context, kubernetes_persist_config, kubernetes_azure_subscription_id, + #DigitalOcean + token, + access_key, + access_secret, # General report_name, report_dir, timestamp, @@ -221,6 +233,11 @@ async def _run(provider, access_key_id=access_key_id, access_key_secret=access_key_secret, + #DigitalOcean + token=token, + access_key=access_key, + access_secret=access_secret, + # Kubernetes kubernetes_cluster_provider=kubernetes_cluster_provider, kubernetes_config_file=kubernetes_config_file, diff --git a/ScoutSuite/core/cli_parser.py b/ScoutSuite/core/cli_parser.py index 9a2d72fc9..161dd417b 100755 --- a/ScoutSuite/core/cli_parser.py +++ b/ScoutSuite/core/cli_parser.py @@ -30,6 +30,7 @@ def __init__(self): self._init_aliyun_parser() self._init_oci_parser() self._init_kubernetes_parser() + self._init_do_parser() def _init_aws_parser(self): parser = self.subparsers.add_parser("aws", @@ -254,6 +255,32 @@ def _init_oci_parser(self): dest='profile', default=None, help='Name of the profile') + + def _init_do_parser(self): + do_parser = self.subparsers.add_parser("do", + parents=[self.common_providers_args_parser], + help="Run Scout against an DigitalOcean account") + + parser = do_parser.add_argument_group('Authentication parameters') + + parser.add_argument('-t', + '--token', + action='store', + default=None, + dest='token', + help='DO Token') + + parser.add_argument('--access_key', + action='store', + default=None, + dest='access_key', + help='Spaces Access Key ID') + parser.add_argument('--access_secret', + action='store', + default=None, + dest='access_secret', + help='Spaces Secret Access Key') + def _init_kubernetes_parser(self): kubernetes_parser = self.subparsers.add_parser("kubernetes", @@ -436,6 +463,11 @@ def parse_args(self, args=None): if v.get('subscription_ids') and v.get('all_subscriptions'): self.parser.error('--subscription-ids and --all-subscriptions are mutually exclusive options') + # DigitalOcean + if v.get('provider') == 'do': + if (v.get('access_key') or v.get('access_secret')) and not (v.get('access_key') and v.get('access_secret')): + self.parser.error('For DO Spaces service please provide both --access_key and --access_secret') + # Kubernetes elif v.get('provider') == 'kubernetes': cluster_provider = v.get('kubernetes_cluster_provider') diff --git a/ScoutSuite/output/data/html/partials/do/services.database.databases.html b/ScoutSuite/output/data/html/partials/do/services.database.databases.html new file mode 100644 index 000000000..1b2bc6455 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.database.databases.html @@ -0,0 +1,33 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.droplet.droplets.html b/ScoutSuite/output/data/html/partials/do/services.droplet.droplets.html new file mode 100644 index 000000000..12229a804 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.droplet.droplets.html @@ -0,0 +1,41 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.networking.domains.html b/ScoutSuite/output/data/html/partials/do/services.networking.domains.html new file mode 100644 index 000000000..fc60c0a8f --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.networking.domains.html @@ -0,0 +1,27 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.networking.firewalls.html b/ScoutSuite/output/data/html/partials/do/services.networking.firewalls.html new file mode 100644 index 000000000..f0647f9a6 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.networking.firewalls.html @@ -0,0 +1,35 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.networking.load_balancers.html b/ScoutSuite/output/data/html/partials/do/services.networking.load_balancers.html new file mode 100644 index 000000000..dda111870 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.networking.load_balancers.html @@ -0,0 +1,25 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.spaces.buckets.html b/ScoutSuite/output/data/html/partials/do/services.spaces.buckets.html new file mode 100644 index 000000000..53b284f65 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.spaces.buckets.html @@ -0,0 +1,26 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/summaries/do/.gitkeep b/ScoutSuite/output/data/html/summaries/do/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/__init__.py b/ScoutSuite/providers/__init__.py index a00fe0a63..e92d522b6 100755 --- a/ScoutSuite/providers/__init__.py +++ b/ScoutSuite/providers/__init__.py @@ -3,7 +3,8 @@ 'azure': 'AzureProvider', 'aliyun': 'AliyunProvider', 'oci': 'OracleProvider', - 'kubernetes': 'KubernetesProvider'} + 'kubernetes': 'KubernetesProvider', + 'do': 'DigitalOceanProvider'} def get_provider_object(provider): diff --git a/ScoutSuite/providers/base/authentication_strategy_factory.py b/ScoutSuite/providers/base/authentication_strategy_factory.py index a6eee9bdf..6a55c8881 100755 --- a/ScoutSuite/providers/base/authentication_strategy_factory.py +++ b/ScoutSuite/providers/base/authentication_strategy_factory.py @@ -4,7 +4,8 @@ 'azure': 'AzureAuthenticationStrategy', 'aliyun': 'AliyunAuthenticationStrategy', 'oci': 'OracleAuthenticationStrategy', - 'kubernetes': 'KubernetesAuthenticationStrategy' + 'kubernetes': 'KubernetesAuthenticationStrategy', + 'do': 'DigitalOceanAuthenticationStrategy' } diff --git a/ScoutSuite/providers/do/authentication_strategy.py b/ScoutSuite/providers/do/authentication_strategy.py new file mode 100644 index 000000000..1d4e17b1f --- /dev/null +++ b/ScoutSuite/providers/do/authentication_strategy.py @@ -0,0 +1,49 @@ +from ScoutSuite.providers.do import utils +from ScoutSuite.providers.base.authentication_strategy import AuthenticationException +from ScoutSuite.providers.base.authentication_strategy import AuthenticationStrategy +from ScoutSuite.core.console import print_warning +from pydo import Client +import logging +import boto3 + + +class DoCredentials: + def __init__(self, client, session=None): + self.client = client + self.session = session + + +class DigitalOceanAuthenticationStrategy(AuthenticationStrategy): + + def authenticate(self, token=None, access_key=None, access_secret=None, **kwargs): + """ + Handles authentication to DigitalOcean. + """ + try: + self.client = Client(token) + # a simple request here to make sure the authentication is successful + self.client.account.get() + + if not (access_key and access_secret): + print_warning( + f"Missing credentials for spaces: Skipping DO Spaces service" + ) + return DoCredentials(client=self.client) + else: + # Set logging level to error for libraries as otherwise generates a lot of warnings + logging.getLogger("botocore").setLevel(logging.ERROR) + logging.getLogger("botocore.auth").setLevel(logging.ERROR) + logging.getLogger("urllib3").setLevel(logging.ERROR) + + session = boto3.Session( + aws_access_key_id=access_key, + aws_secret_access_key=access_secret, + ) + # make sure the DO spaces authentication is successful + region = "blr1" + spaces_client = utils.get_client("s3", session, region) + spaces_client.list_buckets() + return DoCredentials(client=self.client, session=session) + + except Exception as e: + raise AuthenticationException(e) diff --git a/ScoutSuite/providers/do/facade/__init__.py b/ScoutSuite/providers/do/facade/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/facade/base.py b/ScoutSuite/providers/do/facade/base.py new file mode 100644 index 000000000..5c601d44a --- /dev/null +++ b/ScoutSuite/providers/do/facade/base.py @@ -0,0 +1,17 @@ +from ScoutSuite.providers.do.facade.droplet import DropletFacade +from ScoutSuite.providers.do.facade.networking import Networkingfacade +from ScoutSuite.providers.do.facade.database import DatabasesFacade +from ScoutSuite.providers.do.facade.spaces import SpacesFacade +from ScoutSuite.providers.do.authentication_strategy import DoCredentials + + +class DoFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._instantiate_facades() + + def _instantiate_facades(self): + self.droplet = DropletFacade(self._credentials) + self.networking = Networkingfacade(self._credentials) + self.database = DatabasesFacade(self._credentials) + self.spaces = SpacesFacade(self._credentials) diff --git a/ScoutSuite/providers/do/facade/database.py b/ScoutSuite/providers/do/facade/database.py new file mode 100644 index 000000000..ea59adce4 --- /dev/null +++ b/ScoutSuite/providers/do/facade/database.py @@ -0,0 +1,71 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.utils import run_concurrently + + +class DatabasesFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + + async def get_databases(self): + try: + databases = await run_concurrently( + lambda: self._client.databases.list_clusters()["databases"] + ) + return databases + except Exception as e: + print_exception(f"Failed to get databases: {e}") + return [] + + async def get_databaseusers(self, db_uuid): + try: + db_users = await run_concurrently( + lambda: self._client.databases.list_users(db_uuid)["users"] + ) + return db_users + except Exception as e: + print_exception(f"Failed to get db users: {e}") + return [] + + async def get_eviction_policy(self, db_uuid): + try: + eviction_policy = await run_concurrently( + lambda: self._client.databases.get_eviction_policy(db_uuid)[ + "eviction_policy" + ] + ) + return eviction_policy + except Exception as e: + print_exception(f"Failed to get Redis eviction policy: {e}") + return [] + + async def get_connection_pools(self, db_uuid): + try: + connection_pools = await run_concurrently( + lambda: self._client.databases.list_connection_pools(db_uuid)["pools"] + ) + return connection_pools + except Exception as e: + print_exception(f"Failed to get Postgres connection pools: {e}") + return [] + + async def get_firewalls(self, db_uuid): + try: + firewall_rules = await run_concurrently( + lambda: self._client.databases.list_firewall_rules(db_uuid) + ) + return firewall_rules + except Exception as e: + print_exception(f"Failed to get db firewalls: {e}") + return [] + + async def get_resources(self, tag): + try: + resources = await run_concurrently( + lambda: self._client.tags.get(tag)["tag"]["resources"] + ) + return resources + except Exception as e: + print_exception(f"Failed to get tag resources: {e}") + return [] diff --git a/ScoutSuite/providers/do/facade/droplet.py b/ScoutSuite/providers/do/facade/droplet.py new file mode 100644 index 000000000..03b47049f --- /dev/null +++ b/ScoutSuite/providers/do/facade/droplet.py @@ -0,0 +1,38 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.utils import run_concurrently + + +class DropletFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + + async def get_droplets(self): + try: + droplets = await run_concurrently( + lambda: self._client.droplets.list()["droplets"] + ) + return droplets + except Exception as e: + print_exception(f"Failed to get droplets: {e}") + return [] + + async def get_droplet_fwconfig(self, id): + try: + droplet_fwconfig = await run_concurrently( + lambda: self._client.droplets.list_firewalls(id) + ) + return droplet_fwconfig + except Exception as e: + print_exception(f"Failed to get droplet firewall config: {e}") + return [] + + # TODO not required for now + # async def get_droplet_details(self, id): + # try: + # droplets = await run_concurrently(lambda: self._client.droplets.list()['droplets']) + # return droplets + # except Exception as e: + # print_exception(f'Failed to get do droplets: {e}') + # return [] diff --git a/ScoutSuite/providers/do/facade/networking.py b/ScoutSuite/providers/do/facade/networking.py new file mode 100644 index 000000000..fc5e2ae83 --- /dev/null +++ b/ScoutSuite/providers/do/facade/networking.py @@ -0,0 +1,39 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.utils import run_concurrently + + +class Networkingfacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + + async def get_firewalls(self): + try: + firewalls = await run_concurrently( + lambda: self._client.firewalls.list()["firewalls"] + ) + return firewalls + except Exception as e: + print_exception(f"Failed to get firewalls: {e}") + return [] + + async def get_domains(self): + try: + domains = await run_concurrently( + lambda: self._client.domains.list()["domains"] + ) + return domains + except Exception as e: + print_exception(f"Failed to get domains: {e}") + return [] + + async def get_load_balancers(self): + try: + load_balancers = await run_concurrently( + lambda: self._client.load_balancers.list()["load_balancers"] + ) + return load_balancers + except Exception as e: + print_exception(f"Failed to get load balancers: {e}") + return [] diff --git a/ScoutSuite/providers/do/facade/spaces.py b/ScoutSuite/providers/do/facade/spaces.py new file mode 100644 index 000000000..f8fb377a1 --- /dev/null +++ b/ScoutSuite/providers/do/facade/spaces.py @@ -0,0 +1,199 @@ +from botocore.exceptions import ClientError +import boto3 +from ScoutSuite.core.console import print_exception, print_debug, print_warning +from ScoutSuite.providers.aws.facade.utils import AWSFacadeUtils +from ScoutSuite.providers.utils import run_concurrently, get_and_set_concurrently +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials + + +class SpacesFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + self.session = credentials.session + + async def get_all_buckets(self): + buckets = [] + # TODO no api avaialible to get do regions that support spaces. + region_list = ["nyc3", "sfo2", "sfo3", "ams3", "fra1", "sgp1", "syd1", "blr1"] + + for region in region_list: + buckets = await self.get_buckets(region) + + return buckets + + async def get_buckets(self, region=None): + try: + buckets = [] + exception = None + try: + client = self.get_client("s3", self.session, region) + buckets = await run_concurrently( + lambda: client.list_buckets()["Buckets"] + ) + except Exception as e: + exception = e + else: + exception = None # Fix for https://github.com/nccgroup/ScoutSuite/issues/916#issuecomment-728783965 + if not buckets: + if exception: + print_exception(f"Failed to list buckets: {exception}") + return [] + except Exception as e: + print_exception(f"Failed to list buckets: {e}") + return [] + else: + # We need first to retrieve bucket locations before retrieving bucket details + await get_and_set_concurrently( + [self._get_and_set_s3_bucket_location], buckets, region=region + ) + + # Then we can retrieve bucket details concurrently + await get_and_set_concurrently( + [ + self._get_and_set_s3_acls, + ], + buckets, + ) + + return buckets + + async def _get_and_set_s3_bucket_location(self, bucket: {}, region=None): + client = self.get_client("s3", self.session, region) + try: + location = await run_concurrently( + lambda: client.get_bucket_location(Bucket=bucket["Name"]) + ) + except Exception as e: + if "NoSuchBucket" in str(e) or "InvalidToken" in str(e): + print_warning( + "Failed to get bucket location for {}: {}".format(bucket["Name"], e) + ) + else: + print_exception( + "Failed to get bucket location for {}: {}".format(bucket["Name"], e) + ) + location = None + + if location: + region = ( + location["LocationConstraint"] + if location["LocationConstraint"] + else "us-east-1" + ) + + # Fixes issue #59: location constraint can be either EU or eu-west-1 for Ireland... + if region == "EU": + region = "eu-west-1" + else: + region = None + + bucket["region"] = region + + async def _get_and_set_s3_acls(self, bucket: {}, key_name=None): + bucket_name = bucket["Name"] + client = self.get_client("s3", self.session, bucket["region"]) + + try: + grantees = {} + if key_name: + grants = await run_concurrently( + lambda: client.get_object_acl(Bucket=bucket_name, Key=key_name) + ) + else: + grants = await run_concurrently( + lambda: client.get_bucket_acl(Bucket=bucket_name) + ) + for grant in grants["Grants"]: + if "ID" in grant["Grantee"]: + grantee = grant["Grantee"]["ID"] + display_name = ( + grant["Grantee"]["DisplayName"] + if "DisplayName" in grant["Grantee"] + else grant["Grantee"]["ID"] + ) + elif "URI" in grant["Grantee"]: + grantee = grant["Grantee"]["URI"].split("/")[-1] + display_name = self._s3_group_to_string(grant["Grantee"]["URI"]) + else: + grantee = display_name = "Unknown" + permission = grant["Permission"] + grantees.setdefault(grantee, {}) + grantees[grantee]["DisplayName"] = display_name + if "URI" in grant["Grantee"]: + grantees[grantee]["URI"] = grant["Grantee"]["URI"] + grantees[grantee].setdefault("permissions", self._init_s3_permissions()) + self._set_s3_permissions(grantees[grantee]["permissions"], permission) + bucket["grantees"] = grantees + except Exception as e: + if "NoSuchBucket" in str(e) or "InvalidToken" in str(e): + print_warning(f"Failed to get ACL configuration for {bucket_name}: {e}") + else: + print_exception( + f"Failed to get ACL configuration for {bucket_name}: {e}" + ) + bucket["grantees"] = {} + + @staticmethod + def get_client(service: str, session: boto3.session.Session, region: str = None): + """ + Instantiates an AWS API client + + :param service: Service targeted, e.g. ec2 + :param session: The aws session + :param region: Region desired, e.g. us-east-2 + + :return: + """ + + try: + return ( + session.client( + service, + region_name=region, + endpoint_url="https://" + region + ".digitaloceanspaces.com", + ) + if region + else session.client(service) + ) + except Exception as e: + print_exception(f"Failed to create client for the {service} service: {e}") + return None + + @staticmethod + def _init_s3_permissions(): + permissions = { + "read": False, + "write": False, + "read_acp": False, + "write_acp": False, + } + return permissions + + @staticmethod + def _set_s3_permissions(permissions: str, name: str): + if name == "READ" or name == "FULL_CONTROL": + permissions["read"] = True + if name == "WRITE" or name == "FULL_CONTROL": + permissions["write"] = True + if name == "READ_ACP" or name == "FULL_CONTROL": + permissions["read_acp"] = True + if name == "WRITE_ACP" or name == "FULL_CONTROL": + permissions["write_acp"] = True + + @staticmethod + def _s3_group_to_string(uri: str): + if uri == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers": + return "Authenticated users" + elif uri == "http://acs.amazonaws.com/groups/global/AllUsers": + return "Everyone" + elif uri == "http://acs.amazonaws.com/groups/s3/LogDelivery": + return "Log delivery" + else: + return uri + + @staticmethod + def _status_to_bool(value: str): + """Converts a string to True if it is equal to 'Enabled' or to False otherwise.""" + return value == "Enabled" diff --git a/ScoutSuite/providers/do/metadata.json b/ScoutSuite/providers/do/metadata.json new file mode 100644 index 000000000..1dedc9e80 --- /dev/null +++ b/ScoutSuite/providers/do/metadata.json @@ -0,0 +1,50 @@ +{ + "Compute": { + "droplet": { + "resources": { + "droplets": { + "cols": 2, + "path": "services.droplet.droplets" + } + } + } + }, + "Storage": { + "spaces": { + "resources": { + "buckets": { + "cols": 2, + "path": "services.spaces.buckets" + } + } + } + }, + "Network": { + "networking": { + "resources": { + "firewalls": { + "cols": 2, + "path": "services.networking.firewalls" + }, + "domains": { + "cols": 2, + "path": "services.networking.domains" + }, + "load_balancers": { + "cols": 2, + "path": "services.networking.load_balancers" + } + } + } + }, + "DatabaseClusters": { + "database": { + "resources": { + "databases": { + "cols": 2, + "path": "services.database.databases" + } + } + } + } +} diff --git a/ScoutSuite/providers/do/provider.py b/ScoutSuite/providers/do/provider.py new file mode 100644 index 000000000..ba2987e68 --- /dev/null +++ b/ScoutSuite/providers/do/provider.py @@ -0,0 +1,50 @@ +import os +from ScoutSuite.providers.do.services import DigitalOceanServicesConfig +from ScoutSuite.providers.base.provider import BaseProvider + + +class DigitalOceanProvider(BaseProvider): + """ + Implements provider for DigitalOcean + """ + + def __init__( + self, + report_dir=None, + timestamp=None, + services=None, + skipped_services=None, + **kwargs, + ): + + services = [] if services is None else services + skipped_services = [] if skipped_services is None else skipped_services + + self.metadata_path = ( + "%s/metadata.json" % os.path.split(os.path.abspath(__file__))[0] + ) + + self.provider_code = "do" + self.provider_name = "DigitalOcean" + self.environment = "default" + + self.services_config = DigitalOceanServicesConfig + + self.credentials = kwargs["credentials"] + self.account_id = self.credentials.client.account.get() + self.account_id = self.account_id["account"]["uuid"] + + super().__init__(report_dir, timestamp, services, skipped_services) + + def get_report_name(self): + """ + Returns the name of the report using the provider's configuration + """ + if self.account_id: + return f"do-{self.account_id}" + else: + return "do" + + def preprocessing(self, ip_ranges=None, ip_ranges_name_key=None): + + super().preprocessing() diff --git a/ScoutSuite/providers/do/resources/__init__.py b/ScoutSuite/providers/do/resources/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/base.py b/ScoutSuite/providers/do/resources/base.py new file mode 100644 index 000000000..eeb13e981 --- /dev/null +++ b/ScoutSuite/providers/do/resources/base.py @@ -0,0 +1,22 @@ +"""This module provides implementations for Resources and CompositeResources for DO.""" + +import abc + +from ScoutSuite.providers.base.resources.base import Resources, CompositeResources + + +class DoResources(Resources, metaclass=abc.ABCMeta): + """This is the base class for DO resources.""" + + pass + + +class DoCompositeResources(DoResources, CompositeResources, metaclass=abc.ABCMeta): + """This class represents a collection of composite Resources (resources that include nested resources referred as + their children). Classes extending DoCompositeResources have to define a '_children' attribute which consists of + a list of tuples describing the children. The tuples are expected to respect the following format: + (, ). 'child_name' is used to indicate the name under which the child resources will be + stored in the parent object. + """ + + pass diff --git a/ScoutSuite/providers/do/resources/database/__init__.py b/ScoutSuite/providers/do/resources/database/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/database/base.py b/ScoutSuite/providers/do/resources/database/base.py new file mode 100644 index 000000000..6baec57a5 --- /dev/null +++ b/ScoutSuite/providers/do/resources/database/base.py @@ -0,0 +1,14 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.database.databases import Databases + + +class Databases(DoCompositeResources): + _children = [(Databases, "databases")] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "database" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/database/databases.py b/ScoutSuite/providers/do/resources/database/databases.py new file mode 100644 index 000000000..4a7677e06 --- /dev/null +++ b/ScoutSuite/providers/do/resources/database/databases.py @@ -0,0 +1,66 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class Databases(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + clusters = await self.facade.database.get_databases() + if clusters: + for cluster in clusters: + id, cluster = await self._parse_cluster(cluster) + self[id] = cluster + + async def _parse_cluster(self, raw_cluster): + cluster_dict = {} + + cluster_dict["id"] = raw_cluster["id"] + cluster_dict["cluster_name"] = raw_cluster["name"] + cluster_dict["engine"] = raw_cluster["engine"] + cluster_dict["version"] = raw_cluster["version"] + cluster_dict["semantic_version"] = raw_cluster["semantic_version"] + cluster_dict["tags"] = raw_cluster["tags"] + cluster_dict["databases"] = str(raw_cluster["db_names"]) + + trusted_sources = set() + cluster_databases = await self.facade.database.get_firewalls(raw_cluster["id"]) + if cluster_databases: + for cluster_rule in cluster_databases["rules"]: + trusted_sources.add(f"{cluster_rule['type']}s:{cluster_rule['value']}") + + cluster_dict["trusted_sources"] = ( + trusted_sources if trusted_sources else "False" + ) + + if raw_cluster["engine"] == "mysql": + legacy_encryption_users = set() + db_users = await self.facade.database.get_databaseusers(raw_cluster["id"]) + if db_users: + for db_user in db_users: + if ( + db_user["mysql_settings"]["auth_plugin"] + == "mysql_native_password" + ): + legacy_encryption_users.add(db_user["name"]) + + if legacy_encryption_users: + cluster_dict["legacy_encryption_users"] = ( + str(legacy_encryption_users) if legacy_encryption_users else "False" + ) + + elif raw_cluster["engine"] == "redis": + cluster_dict["eviction_policy"] = ( + await self.facade.database.get_eviction_policy(raw_cluster["id"]) + ) + + elif raw_cluster["engine"] == "pg": + connection_pools = await self.facade.database.get_connection_pools( + raw_cluster["id"] + ) + cluster_dict["connection_pools"] = ( + connection_pools if connection_pools else "False" + ) + + return cluster_dict["id"], cluster_dict diff --git a/ScoutSuite/providers/do/resources/droplet/__init__.py b/ScoutSuite/providers/do/resources/droplet/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/droplet/base.py b/ScoutSuite/providers/do/resources/droplet/base.py new file mode 100644 index 000000000..5a5ebdf1a --- /dev/null +++ b/ScoutSuite/providers/do/resources/droplet/base.py @@ -0,0 +1,14 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.droplet.droplets import Droplets + + +class Droplets(DoCompositeResources): + _children = [(Droplets, "droplets")] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "droplet" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/droplet/droplets.py b/ScoutSuite/providers/do/resources/droplet/droplets.py new file mode 100644 index 000000000..32198ad43 --- /dev/null +++ b/ScoutSuite/providers/do/resources/droplet/droplets.py @@ -0,0 +1,83 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class Droplets(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + + droplets = await self.facade.droplet.get_droplets() + if droplets: + for droplet in droplets: + id, droplet = await self._parse_droplet(droplet) + self[id] = droplet + + async def _parse_droplet(self, raw_droplet): + droplet_dict = {} + + droplet_dict["id"] = raw_droplet["id"] + droplet_dict["name"] = raw_droplet["name"] + droplet_dict["memory"] = raw_droplet["memory"] + droplet_dict["vcpus"] = raw_droplet["vcpus"] + droplet_dict["disk"] = raw_droplet["disk"] + droplet_dict["locked"] = raw_droplet["locked"] + droplet_dict["status"] = raw_droplet["status"] + droplet_dict["kernel"] = raw_droplet["kernel"] + droplet_dict["created_at"] = raw_droplet["created_at"] + droplet_dict["features"] = raw_droplet["features"] + droplet_dict["backup_ids"] = str(raw_droplet["backup_ids"]) + droplet_dict["next_backup_window"] = raw_droplet["next_backup_window"] + droplet_dict["snapshot_ids"] = str(raw_droplet["snapshot_ids"]) + droplet_dict["image"] = raw_droplet["image"]["slug"] + droplet_dict["volume_ids"] = str(raw_droplet["volume_ids"]) + droplet_dict["size"] = raw_droplet["size"]["slug"] + droplet_dict["size_slug"] = raw_droplet["size_slug"] + droplet_dict["networks"] = str(raw_droplet["networks"]) + droplet_dict["region"] = raw_droplet["region"]["slug"] + droplet_dict["tags"] = raw_droplet["tags"] + droplet_dict["vpc_uuid"] = raw_droplet["vpc_uuid"] + droplet_dict["firewalls"] = None + + droplet_fwconfig = await self.facade.droplet.get_droplet_fwconfig( + raw_droplet["id"] + ) + public_ports = {} + + if droplet_fwconfig: + if droplet_fwconfig["firewalls"]: + droplet_dict["firewalls"] = "" + for firewall in droplet_fwconfig["firewalls"]: + droplet_dict["firewalls"] = ( + droplet_dict["firewalls"] + " , " + firewall["id"] + if droplet_dict["firewalls"] + else firewall["id"] + ) + + for rules in firewall["inbound_rules"]: + if ( + "0.0.0.0/0" in rules["sources"]["addresses"] + or "::/0" in rules["sources"]["addresses"] + ): + public_ports[rules["ports"]] = rules["sources"]["addresses"] + + droplet_dict["all_ports_exposed"] = ( + "True" + if ("0" in public_ports.keys() or not droplet_fwconfig["firewalls"]) + else "False" + ) + droplet_dict["port_22_exposed"] = ( + "True" + if ("22" in public_ports.keys() or droplet_dict["all_ports_exposed"]) + else "False" + ) + + droplet_dict["public_ports_enabled"] = "True" if public_ports else "False" + droplet_dict["public_port_detail"] = ( + f"Port {','.join(public_ports.keys())} exposed to public internet due to this configuration {str(public_ports)}" + if public_ports + else "" + ) + + return droplet_dict["id"], droplet_dict diff --git a/ScoutSuite/providers/do/resources/networking/__init__.py b/ScoutSuite/providers/do/resources/networking/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/networking/base.py b/ScoutSuite/providers/do/resources/networking/base.py new file mode 100644 index 000000000..f9079148c --- /dev/null +++ b/ScoutSuite/providers/do/resources/networking/base.py @@ -0,0 +1,20 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.networking.firewalls import Firewalls +from ScoutSuite.providers.do.resources.networking.domains import Domains +from ScoutSuite.providers.do.resources.networking.load_balancers import LoadBalancers + + +class Networking(DoCompositeResources): + _children = [ + (Firewalls, "firewalls"), + (Domains, "domains"), + (LoadBalancers, "load_balancers"), + ] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "networking" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/networking/domains.py b/ScoutSuite/providers/do/resources/networking/domains.py new file mode 100644 index 000000000..c0598df5c --- /dev/null +++ b/ScoutSuite/providers/do/resources/networking/domains.py @@ -0,0 +1,76 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade +import zonefile_parser +import re + + +class Domains(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + domains = await self.facade.networking.get_domains() + if domains: + for domain in domains: + id, domain = await self._parse_domain(domain) + if domain: + self[id] = domain + + async def _parse_domain(self, raw_domain): + domain_dict = {} + + domain_dict["id"] = raw_domain["name"] + zone_file = raw_domain["zone_file"] + + try: + records = zonefile_parser.parse(zone_file) + except Exception as e: + print_exception( + f"Failed to parse DNS records check your TXT records for {e}" + ) + return None, None + + record_types = {} + highttl_records = set() + for record in records: + if record.rtype == "TXT": + if record.rdata["value"].startswith("v=spf"): + record_types.update({"SPF": record}) + elif record.rdata["value"].startswith("v=DKIM"): + record_types.update({"DKIM": record}) + elif record.rdata["value"].startswith("v=DMARC"): + record_types.update({"DMARC": record}) + if record.ttl and int(record.ttl) > 3600: + highttl_records.add(record) + record_types.update({record.rtype: record}) + + if "SPF" in record_types: + spf_value = record_types["SPF"].rdata["value"] + + domain_dict["spf_record"] = spf_value if "SPF" in record_types else "False" + domain_dict["dmarc_record"] = ( + record_types["DMARC"].rdata["value"] if "DMARC" in record_types else "False" + ) + domain_dict["dkim_record"] = ( + record_types["DKIM"].rdata["value"] if "DKIM" in record_types else "False" + ) + + domain_dict["highttl_records"] = ( + str( + [ + f"Type[{record.rtype}]::Name[{record.name}]::ttl[{record.ttl}]" + for record in highttl_records + ] + ) + if highttl_records + else "False" + ) + + domain_dict["spf_record_all"] = ( + spf_value + if ("SPF" in record_types and ("+all" in spf_value or "~all" in spf_value)) + else "False" + ) + + return domain_dict["id"], domain_dict diff --git a/ScoutSuite/providers/do/resources/networking/firewalls.py b/ScoutSuite/providers/do/resources/networking/firewalls.py new file mode 100644 index 000000000..566b3f59a --- /dev/null +++ b/ScoutSuite/providers/do/resources/networking/firewalls.py @@ -0,0 +1,47 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class Firewalls(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + + firewalls = await self.facade.networking.get_firewalls() + if firewalls: + for firewall in firewalls: + id, firewall = await self._parse_firewall(firewall) + self[id] = firewall + + async def _parse_firewall(self, raw_firewall): + firewall_dict = {} + + firewall_dict["id"] = raw_firewall["id"] + firewall_dict["name"] = raw_firewall["name"] + firewall_dict["status"] = raw_firewall["status"] + firewall_dict["inbound_rules"] = raw_firewall["inbound_rules"] + firewall_dict["outbound_rules"] = raw_firewall["outbound_rules"] + firewall_dict["created_at"] = raw_firewall["created_at"] + firewall_dict["droplet_ids"] = str(raw_firewall["droplet_ids"]) + firewall_dict["tags"] = str(raw_firewall["tags"]) + firewall_dict["pending_changes"] = str(raw_firewall["pending_changes"]) + public_ports = {} + for rules in raw_firewall["inbound_rules"]: + if ( + "0.0.0.0/0" in rules["sources"]["addresses"] + or "::/0" in rules["sources"]["addresses"] + ): + public_ports[rules["ports"]] = rules["sources"]["addresses"] + + firewall_dict["all_ports_exposed"] = ( + "True" if ("0" in public_ports.keys()) else "False" + ) + firewall_dict["public_ports_enabled"] = "True" if public_ports else "False" + firewall_dict["public_port_detail"] = ( + f"Port {','.join(public_ports.keys())} exposed to public internet due to this configuration {str(public_ports)}" + if public_ports + else "" + ) + + return firewall_dict["id"], firewall_dict diff --git a/ScoutSuite/providers/do/resources/networking/load_balancers.py b/ScoutSuite/providers/do/resources/networking/load_balancers.py new file mode 100644 index 000000000..ba8109036 --- /dev/null +++ b/ScoutSuite/providers/do/resources/networking/load_balancers.py @@ -0,0 +1,29 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class LoadBalancers(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + load_balancers = await self.facade.networking.get_load_balancers() + if load_balancers: + for load_balancer in load_balancers: + id, load_balancer = await self._parse_load_balancer(load_balancer) + self[id] = load_balancer + + async def _parse_load_balancer(self, raw_load_balancer): + load_balancer_dict = {} + + load_balancer_dict["id"] = raw_load_balancer["id"] + load_balancer_dict["name"] = raw_load_balancer["name"] + load_balancer_dict["name"] = raw_load_balancer["name"] + load_balancer_dict["redirect_http_to_https"] = str( + raw_load_balancer["redirect_http_to_https"] + ) + load_balancer_dict["enable_backend_keepalive"] = str( + raw_load_balancer["enable_backend_keepalive"] + ) + + return load_balancer_dict["id"], load_balancer_dict diff --git a/ScoutSuite/providers/do/resources/spaces/__init__.py b/ScoutSuite/providers/do/resources/spaces/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/spaces/base.py b/ScoutSuite/providers/do/resources/spaces/base.py new file mode 100644 index 000000000..2f3d4fec5 --- /dev/null +++ b/ScoutSuite/providers/do/resources/spaces/base.py @@ -0,0 +1,14 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.spaces.buckets import Buckets + + +class Spaces(DoCompositeResources): + _children = [(Buckets, "buckets")] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "buckets" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/spaces/buckets.py b/ScoutSuite/providers/do/resources/spaces/buckets.py new file mode 100644 index 000000000..935df88b8 --- /dev/null +++ b/ScoutSuite/providers/do/resources/spaces/buckets.py @@ -0,0 +1,43 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade +import json + + +class Buckets(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + + buckets = await self.facade.spaces.get_all_buckets() + if buckets: + for bucket in buckets: + id, bucket = await self._parse_buckets(bucket) + self[id] = bucket + + async def _parse_buckets(self, raw_buckets): + buckets_dict = {} + + buckets_dict["name"] = raw_buckets["Name"] + buckets_dict["public_read"] = ( + str(raw_buckets["grantees"]["AllUsers"]["permissions"]["read"]) + if raw_buckets["grantees"] + else None + ) + buckets_dict["public_write"] = ( + raw_buckets["grantees"]["AllUsers"]["permissions"]["write"] + if raw_buckets["grantees"] + else None + ) + buckets_dict["read_acp"] = ( + raw_buckets["grantees"]["AllUsers"]["permissions"]["read_acp"] + if raw_buckets["grantees"] + else None + ) + buckets_dict["write_acp"] = ( + raw_buckets["grantees"]["AllUsers"]["permissions"]["write_acp"] + if raw_buckets["grantees"] + else None + ) + + return buckets_dict["name"], buckets_dict diff --git a/ScoutSuite/providers/do/rules/filters/.gitkeep b/ScoutSuite/providers/do/rules/filters/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-mysql-publically-exposed.json b/ScoutSuite/providers/do/rules/findings/database-databases-mysql-publically-exposed.json new file mode 100644 index 000000000..28b350e02 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-mysql-publically-exposed.json @@ -0,0 +1,20 @@ +{ + "description": "Mysql Database cluster publically exposed", + "rationale": "Typically, only the application servers should be allowed to connect to the database cluster.", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.trusted_sources", + "equal", + "False" + ], + [ + "database.databases.id.engine", + "equal", + "mysql" + ] + ], + "id_suffix": "trusted_sources" +} diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-mysql-user-legacy-encryption.json b/ScoutSuite/providers/do/rules/findings/database-databases-mysql-user-legacy-encryption.json new file mode 100644 index 000000000..1138f2b28 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-mysql-user-legacy-encryption.json @@ -0,0 +1,20 @@ +{ + "description": "Mysql Database user with Legacy MySQL 5.x encryption", + "rationale": "DigitalOcean Managed Databases using MySQL 8+ are automatically configured to use caching_sha2_password authentication by default. caching_sha2_password uses a stronger password encryption than prior versions of MySQL.", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.legacy_encryption_users", + "notEqual", + "False" + ], + [ + "database.databases.id.engine", + "equal", + "mysql" + ] + ], + "id_suffix": "legacy_encryption_users" +} diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-postgres-connection-pools.json b/ScoutSuite/providers/do/rules/findings/database-databases-postgres-connection-pools.json new file mode 100644 index 000000000..cd6eb64e5 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-postgres-connection-pools.json @@ -0,0 +1,20 @@ +{ + "description": "No connection pools found for Postgres database", + "rationale": "When you use PostgreSQL without a connection pool, each client request creates a new connection to the database. This can lead to a high number of connections, which can cause performance issues and slow down your application. Connection pooling can help mitigate these issues by reusing existing connections instead of creating new ones for each request", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.connection_pools", + "equal", + "False" + ], + [ + "database.databases.id.engine", + "equal", + "pg" + ] + ], + "id_suffix": "connection_pools" +} diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-redis-evicition-policy.json b/ScoutSuite/providers/do/rules/findings/database-databases-redis-evicition-policy.json new file mode 100644 index 000000000..c862f5d27 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-redis-evicition-policy.json @@ -0,0 +1,20 @@ +{ + "description": "Eviction policy for Redis database cluster not set to 'allkeys-lru'", + "rationale": "When Redis is used as a cache, it is often convenient to let it automatically evict old data as you add new data. Redis provides several eviction policies to choose from, including allkeys-lru, allkeys-lfu, volatile-lru, volatile-lfu, allkeys-random, volatile-random, and volatile-ttl 1. If you do not set an eviction policy, Redis will use the noeviction policy by default. This means that Redis will not evict any keys when the memory limit is reached, and any new values will not be saved 1. If you do not set an eviction policy and Redis runs out of memory, it will start to return errors for commands that could result in more memory being used 1. In general, it is recommended to use the allkeys-lru policy when you expect a power-law distribution in the popularity of your requests. That is, you expect a subset of elements will be accessed far more often than the rest", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.eviction_policy", + "notEqual", + "allkeys-lru" + ], + [ + "database.databases.id.engine", + "equal", + "redis" + ] + ], + "id_suffix": "eviction_policy" +} diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-all-ports-exposed.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-all-ports-exposed.json new file mode 100644 index 000000000..a491c5164 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-all-ports-exposed.json @@ -0,0 +1,15 @@ +{ + "description": "Droplets with all ports exposed to public", + "rationale": "Droplets should expose only required/intented ports to public internet", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.all_ports_exposed", + "equal", + "True" + ] + ], + "id_suffix": "all_ports_exposed" +} diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-enabled.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-enabled.json new file mode 100644 index 000000000..0ba65c386 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-enabled.json @@ -0,0 +1,17 @@ +{ + "description": "Droplets with auto backups disabled", + "rationale": "Droplet backups feature should be enabled for disaster recovery.", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.next_backup_window", + "null", + "" + ] + ], + "id_suffix": "next_backup_window" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-present.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-present.json new file mode 100644 index 000000000..3ee3f6868 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-present.json @@ -0,0 +1,17 @@ +{ + "description": "Droplets having no backups present", + "rationale": "Droplets should have atleast 1 backup present for disaster recovery.", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.backup_ids", + "equal", + "[]" + ] + ], + "id_suffix": "backup_ids" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-firewall-not-attached.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-firewall-not-attached.json new file mode 100644 index 000000000..e8f1937c7 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-firewall-not-attached.json @@ -0,0 +1,17 @@ +{ + "description": "Droplets with no firewall attached", + "rationale": "Droplet should have a firewall atatched for enabling secure network configuration", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.firewalls", + "null", + "" + ] + ], + "id_suffix": "firewalls" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-port-22-exposed.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-port-22-exposed.json new file mode 100644 index 000000000..cc9d7a86a --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-port-22-exposed.json @@ -0,0 +1,15 @@ +{ + "description": "Droplets with port 22 exposed to public", + "rationale": "Droplets should have port 22 restricted to trusted networks", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.port_22_exposed", + "equal", + "True" + ] + ], + "id_suffix": "port_22_exposed" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-high-ttl.json b/ScoutSuite/providers/do/rules/findings/networking-domains-high-ttl.json new file mode 100644 index 000000000..9a8a42487 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-high-ttl.json @@ -0,0 +1,15 @@ +{ + "description": "Domain has a high TTL record", + "rationale": "Long TTLs delay the propagation of changes. For instance, if you update an IP address or switch services, clients will continue using old cached data until the TTL expires", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.highttl_records", + "notEqual", + "False" + ] + ], + "id_suffix": "highttl_records" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dkim.json b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dkim.json new file mode 100644 index 000000000..d9c64dd38 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dkim.json @@ -0,0 +1,15 @@ +{ + "description": "Domain is missing DKIM record", + "rationale": "DKIM helps prevent email spoofing by adding cryptographic signatures to your outgoing emails", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.dkim_record", + "equal", + "False" + ] + ], + "id_suffix": "dkim_record" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dmarc.json b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dmarc.json new file mode 100644 index 000000000..350d18a28 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dmarc.json @@ -0,0 +1,15 @@ +{ + "description": "Domain is missing DMARC record", + "rationale": "A DMARC policy tells a receiving email server what to do after checking a domain's Sender Policy Framework (SPF) and DomainKeys Identified Mail (DKIM) records, which are additional email authentication methods. Addtionally without DMARC, you won't be able receive reports about legitimate and unauthorized emails sent on behalf of your domain", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.dmarc_record", + "equal", + "False" + ] + ], + "id_suffix": "dmarc_record" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-missing-spf.json b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-spf.json new file mode 100644 index 000000000..d4f540b0b --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-spf.json @@ -0,0 +1,15 @@ +{ + "description": "Domain is missing SPF record", + "rationale": "Without an SPF record, attackers can spoof your domain by sending emails that appear to originate from your legitimate domain", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.spf_record", + "equal", + "False" + ] + ], + "id_suffix": "spf_record" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-spf-overly-permissive.json b/ScoutSuite/providers/do/rules/findings/networking-domains-spf-overly-permissive.json new file mode 100644 index 000000000..2b997e1aa --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-spf-overly-permissive.json @@ -0,0 +1,15 @@ +{ + "description": "Domain has a overly permissive SPF record", + "rationale": "Overly permissive SPF record allows the anyone to send emails on your domain's behalf", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.spf_record_all", + "notEqual", + "False" + ] + ], + "id_suffix": "spf_record_all" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-firewalls-public-ports.json b/ScoutSuite/providers/do/rules/findings/networking-firewalls-public-ports.json new file mode 100644 index 000000000..fc037a57d --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-firewalls-public-ports.json @@ -0,0 +1,15 @@ +{ + "description": "Firewalls with publically exposed ports", + "rationale": "Firewalls should not expose sensitive exposed to public internet.", + "dashboard_name": "Networking", + "path": "networking.firewalls.id", + "conditions": [ + "and", + [ + "networking.firewalls.id.public_ports_enabled", + "equal", + "True" + ] + ], + "id_suffix": "public_ports_enabled" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-firewalls-quad-zero.json b/ScoutSuite/providers/do/rules/findings/networking-firewalls-quad-zero.json new file mode 100644 index 000000000..3087e3a14 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-firewalls-quad-zero.json @@ -0,0 +1,15 @@ +{ + "description": "Firewalls with quad zero configuration", + "rationale": "Firewalls with quad zero configuration expose all ports to public internet", + "dashboard_name": "Networking", + "path": "networking.firewalls.id", + "conditions": [ + "and", + [ + "networking.firewalls.id.all_ports_exposed", + "equal", + "True" + ] + ], + "id_suffix": "all_ports_exposed" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-load-balancer-backend-keepalive-disabled.json b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-backend-keepalive-disabled.json new file mode 100644 index 000000000..c32919003 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-backend-keepalive-disabled.json @@ -0,0 +1,15 @@ +{ + "description": "Load Balancer with backend Keepalive disabled", + "rationale": "Consider enabling Keep-Alive to improve performance, reduce latency and load", + "dashboard_name": "Networking", + "path": "networking.load_balancers.id", + "conditions": [ + "and", + [ + "networking.load_balancers.id.enable_backend_keepalive", + "equal", + "False" + ] + ], + "id_suffix": "enable_backend_keepalive" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-load-balancer-ssl-redirect-disabled.json b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-ssl-redirect-disabled.json new file mode 100644 index 000000000..8473227c7 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-ssl-redirect-disabled.json @@ -0,0 +1,15 @@ +{ + "description": "Load Balancer with SSL redirects disabled", + "rationale": "SSL redirects should be enabled to enforce https connection", + "dashboard_name": "Networking", + "path": "networking.load_balancers.id", + "conditions": [ + "and", + [ + "networking.load_balancers.id.redirect_http_to_https", + "equal", + "False" + ] + ], + "id_suffix": "redirect_http_to_https" +} diff --git a/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-read.json b/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-read.json new file mode 100644 index 000000000..500a641b2 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-read.json @@ -0,0 +1,17 @@ +{ + "description": "Bucket with public read access", + "rationale": "Buckets with sensitive data must be private only.", + "dashboard_name": "Spaces", + "path": "spaces.buckets.id", + "conditions": [ + "and", + [ + "spaces.buckets.id.public_read", + "equal", + "True" + ] + ], + "id_suffix": "public_read" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-write.json b/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-write.json new file mode 100644 index 000000000..ed3005fd9 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-write.json @@ -0,0 +1,17 @@ +{ + "description": "Bucket with public write access", + "rationale": "Buckets with sensitive data must be private only.", + "dashboard_name": "Spaces", + "path": "spaces.buckets.id", + "conditions": [ + "and", + [ + "spaces.buckets.id.public_write", + "equal", + "true" + ] + ], + "id_suffix": "public_write" +} + + diff --git a/ScoutSuite/providers/do/rules/rulesets/default.json b/ScoutSuite/providers/do/rules/rulesets/default.json new file mode 100644 index 000000000..02d471e4a --- /dev/null +++ b/ScoutSuite/providers/do/rules/rulesets/default.json @@ -0,0 +1,119 @@ +{ + "about": "Default ruleset for DigitalOcean.", + "rules": { + "droplet-droplets-backup-not-enabled.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "droplet-droplets-backup-not-present.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "droplet-droplets-firewall-not-attached.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "droplet-droplets-port-22-exposed.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "droplet-droplets-all-ports-exposed.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "spaces-buckets-public-read.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-firewalls-public-ports.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-firewalls-quad-zero.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-load-balancer-ssl-redirect-disabled.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-load-balancer-backend-keepalive-disabled.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-domains-missing-spf.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-domains-missing-dkim.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-domains-missing-dmarc.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-domains-spf-overly-permissive.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-domains-high-ttl.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "database-databases-mysql-publically-exposed.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "database-databases-mysql-user-legacy-encryption.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "database-databases-redis-evicition-policy.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "database-databases-postgres-connection-pools.json": [ + { + "enabled": true, + "level": "warning" + } + ] + } +} diff --git a/ScoutSuite/providers/do/rules/rulesets/filters.json b/ScoutSuite/providers/do/rules/rulesets/filters.json new file mode 100644 index 000000000..d6a73a987 --- /dev/null +++ b/ScoutSuite/providers/do/rules/rulesets/filters.json @@ -0,0 +1,4 @@ +{ + "about": "Default set of filters for Scout", + "rules": {} +} diff --git a/ScoutSuite/providers/do/services.py b/ScoutSuite/providers/do/services.py new file mode 100644 index 000000000..d3523e4cf --- /dev/null +++ b/ScoutSuite/providers/do/services.py @@ -0,0 +1,23 @@ +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.do.resources.droplet.base import Droplets +from ScoutSuite.providers.do.resources.spaces.base import Spaces +from ScoutSuite.providers.do.resources.networking.base import Networking +from ScoutSuite.providers.do.resources.database.base import Databases +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.base.services import BaseServicesConfig + + +class DigitalOceanServicesConfig(BaseServicesConfig): + def __init__(self, credentials: DoCredentials = None, **kwargs): + super().__init__(credentials) + + facade = DoFacade(credentials) + + self.droplet = Droplets(facade) + self.networking = Networking(facade) + self.database = Databases(facade) + if self.credentials.session: + self.spaces = Spaces(facade) + + def _is_provider(self, provider_name): + return provider_name == "do" diff --git a/ScoutSuite/providers/do/utils.py b/ScoutSuite/providers/do/utils.py new file mode 100644 index 000000000..c08c8becb --- /dev/null +++ b/ScoutSuite/providers/do/utils.py @@ -0,0 +1,23 @@ +import boto3 +from ScoutSuite.core.console import print_exception, print_debug, print_warning + + +def get_client(service: str, session: boto3.session.Session, region: str = None): + """ + Instantiates an DO Spaces API client + + """ + + try: + return ( + session.client( + service, + region_name=region, + endpoint_url="https://" + region + ".digitaloceanspaces.com", + ) + if region + else session.client(service) + ) + except Exception as e: + print_exception(f"Failed to create client for the {service} service: {e}") + return None diff --git a/requirements.txt b/requirements.txt index 7f53a4dfc..3e95690bc 100755 --- a/requirements.txt +++ b/requirements.txt @@ -65,4 +65,10 @@ oss2>=2.8.0 oci>=2.2.4 # Kubernetes SDK -kubernetes \ No newline at end of file +kubernetes + +# DigitalOcean Cloud Provider +pydo >=0.2.0 + +#zone file parser for DigitalOcean domains service +zonefile_parser >=0.1.14 \ No newline at end of file diff --git a/tools/process_raw_response.py b/tools/process_raw_response.py index eec1a5c51..c2044f69d 100755 --- a/tools/process_raw_response.py +++ b/tools/process_raw_response.py @@ -52,7 +52,7 @@ def camel_to_snake(name, upper=False): parser.add_argument('-v', '--value', required=True, help="The raw response") args = parser.parse_args() - if args.provider not in ['aws', 'azure', 'aliyun', 'gcp', 'oci', 'kubernetes']: + if args.provider not in ['aws', 'azure', 'aliyun', 'gcp', 'oci', 'do', 'kubernetes']: # TODO support more providers print('Provider not implemented') exit() @@ -79,6 +79,9 @@ def camel_to_snake(name, upper=False): elif args.provider == 'oci': object_format = 'raw_{}.{}' object_value_dict = json.loads(args.value) + elif args.provider == 'do': + object_format = 'raw_{}.{}' + object_value_dict = json.loads(args.value) elif args.provider == 'kubernetes': object_format = 'raw_{}.{}' object_value_dict = json.loads(args.value) From 63b1b4036d16e29e708bd8ac705c1ca572a48e0b Mon Sep 17 00:00:00 2001 From: Asif Wani Date: Fri, 9 Feb 2024 20:32:50 +0530 Subject: [PATCH 15/32] added-pagination-support --- ScoutSuite/providers/do/facade/droplet.py | 18 ++++++++---- ScoutSuite/providers/do/facade/networking.py | 27 +++++++++++------ ScoutSuite/providers/do/facade/utils.py | 31 ++++++++++++++++++++ 3 files changed, 62 insertions(+), 14 deletions(-) create mode 100644 ScoutSuite/providers/do/facade/utils.py diff --git a/ScoutSuite/providers/do/facade/droplet.py b/ScoutSuite/providers/do/facade/droplet.py index 03b47049f..32944a7db 100644 --- a/ScoutSuite/providers/do/facade/droplet.py +++ b/ScoutSuite/providers/do/facade/droplet.py @@ -1,27 +1,35 @@ from ScoutSuite.core.console import print_exception from ScoutSuite.providers.do.authentication_strategy import DoCredentials from ScoutSuite.providers.utils import run_concurrently +from ScoutSuite.providers.do.facade.utils import DOFacadeUtils class DropletFacade: def __init__(self, credentials: DoCredentials): self._credentials = credentials self._client = credentials.client + self.current_page = 1 + self.per_page = 50 async def get_droplets(self): try: - droplets = await run_concurrently( - lambda: self._client.droplets.list()["droplets"] + droplets = await DOFacadeUtils.get_all_from_pagination( + self._client.droplets.list, self.current_page, self.per_page, "droplets" ) - return droplets + return droplets["droplets"] except Exception as e: print_exception(f"Failed to get droplets: {e}") return [] async def get_droplet_fwconfig(self, id): try: - droplet_fwconfig = await run_concurrently( - lambda: self._client.droplets.list_firewalls(id) + filters = {"droplet_id": id} + droplet_fwconfig = await DOFacadeUtils.get_all_from_pagination( + self._client.droplets.list_firewalls, + self.current_page, + self.per_page, + "firewalls", + filters, ) return droplet_fwconfig except Exception as e: diff --git a/ScoutSuite/providers/do/facade/networking.py b/ScoutSuite/providers/do/facade/networking.py index fc5e2ae83..1b5b760b6 100644 --- a/ScoutSuite/providers/do/facade/networking.py +++ b/ScoutSuite/providers/do/facade/networking.py @@ -1,5 +1,6 @@ from ScoutSuite.core.console import print_exception from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.do.facade.utils import DOFacadeUtils from ScoutSuite.providers.utils import run_concurrently @@ -7,33 +8,41 @@ class Networkingfacade: def __init__(self, credentials: DoCredentials): self._credentials = credentials self._client = credentials.client + self.current_page = 1 + self.per_page = 50 async def get_firewalls(self): try: - firewalls = await run_concurrently( - lambda: self._client.firewalls.list()["firewalls"] + firewalls = await DOFacadeUtils.get_all_from_pagination( + self._client.firewalls.list, + self.current_page, + self.per_page, + "firewalls", ) - return firewalls + return firewalls["firewalls"] except Exception as e: print_exception(f"Failed to get firewalls: {e}") return [] async def get_domains(self): try: - domains = await run_concurrently( - lambda: self._client.domains.list()["domains"] + domains = await DOFacadeUtils.get_all_from_pagination( + self._client.domains.list, self.current_page, self.per_page, "domains" ) - return domains + return domains["domains"] except Exception as e: print_exception(f"Failed to get domains: {e}") return [] async def get_load_balancers(self): try: - load_balancers = await run_concurrently( - lambda: self._client.load_balancers.list()["load_balancers"] + load_balancers = await DOFacadeUtils.get_all_from_pagination( + self._client.load_balancers.list, + self.current_page, + self.per_page, + "load_balancers", ) - return load_balancers + return load_balancers["load_balancers"] except Exception as e: print_exception(f"Failed to get load balancers: {e}") return [] diff --git a/ScoutSuite/providers/do/facade/utils.py b/ScoutSuite/providers/do/facade/utils.py new file mode 100644 index 000000000..b377bc7a7 --- /dev/null +++ b/ScoutSuite/providers/do/facade/utils.py @@ -0,0 +1,31 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.utils import run_concurrently + + +class DOFacadeUtils: + + @staticmethod + async def get_all_from_pagination( + list_client, current_page, per_page, object_name, filters=None + ): + final_output = {} + next_page = True + while next_page: + if filters: + resp = await run_concurrently( + lambda: list_client(**filters, per_page=per_page, page=current_page) + ) + else: + resp = await run_concurrently( + lambda: list_client(per_page=per_page, page=current_page) + ) + if object_name in final_output.keys(): + final_output[object_name].extend(resp[object_name]) + else: + final_output[object_name] = resp[object_name] + + pages = resp.get("links").get("pages", {}) + next_page = "next" in pages.keys() + current_page += 1 + return final_output From f7350ba2b40d88b7a26533153d34f18c0f9034f4 Mon Sep 17 00:00:00 2001 From: Jakob Rieck Date: Mon, 8 Jan 2024 17:09:32 +0100 Subject: [PATCH 16/32] Fixes 'Key Vault Not Recoverable' check ScoutSuite previously did not flag key vaults for which the API returned enable_soft_delete = null. Such key vaults have neither soft-delete nor purge protecton enabled and are also not recoverable. The check would only flag key vaults for which enable_soft_delete = false. --- ScoutSuite/providers/azure/resources/keyvault/vaults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ScoutSuite/providers/azure/resources/keyvault/vaults.py b/ScoutSuite/providers/azure/resources/keyvault/vaults.py index 4d5005898..e4be072ca 100755 --- a/ScoutSuite/providers/azure/resources/keyvault/vaults.py +++ b/ScoutSuite/providers/azure/resources/keyvault/vaults.py @@ -30,7 +30,7 @@ def _parse_key_vault(self, raw_vault): vault['resource_group_name'] = get_resource_group_name(raw_vault.id) vault['properties'] = raw_vault.properties vault[ - 'recovery_protection_enabled'] = raw_vault.properties.enable_soft_delete and \ + 'recovery_protection_enabled'] = bool(raw_vault.properties.enable_soft_delete) and \ bool(raw_vault.properties.enable_purge_protection) vault['public_access_allowed'] = self._is_public_access_allowed(raw_vault) vault['rbac_authorization_enabled'] = raw_vault.properties.enable_rbac_authorization From 137228e3030288c51be891bfc16e9ff53108e9e7 Mon Sep 17 00:00:00 2001 From: Jakob Rieck Date: Tue, 9 Jan 2024 10:31:20 +0100 Subject: [PATCH 17/32] Fixes 'Key Vault Role Based Access Control Disabled' check ScoutSuite failed to flag key vaults where the enable_rbac_authorization field was set to null. Through manual configuration in the Azure portal I confirmed that RBAC Access Control is disabled if this field is set to null. --- ScoutSuite/providers/azure/resources/keyvault/vaults.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ScoutSuite/providers/azure/resources/keyvault/vaults.py b/ScoutSuite/providers/azure/resources/keyvault/vaults.py index 4d5005898..8e030bdb8 100755 --- a/ScoutSuite/providers/azure/resources/keyvault/vaults.py +++ b/ScoutSuite/providers/azure/resources/keyvault/vaults.py @@ -33,7 +33,7 @@ def _parse_key_vault(self, raw_vault): 'recovery_protection_enabled'] = raw_vault.properties.enable_soft_delete and \ bool(raw_vault.properties.enable_purge_protection) vault['public_access_allowed'] = self._is_public_access_allowed(raw_vault) - vault['rbac_authorization_enabled'] = raw_vault.properties.enable_rbac_authorization + vault['rbac_authorization_enabled'] = bool(raw_vault.properties.enable_rbac_authorization) return vault['id'], vault def _is_public_access_allowed(self, raw_vault): From e011d48e9b46d50dc974790a49a44d30fce40a48 Mon Sep 17 00:00:00 2001 From: Jakob Rieck Date: Tue, 9 Jan 2024 14:42:21 +0100 Subject: [PATCH 18/32] Corrected display name for 'Blob Containers Allowing Public Access' The test scans Blob Containers, not Storage Accounts. There is a 1:n relationship between Storage Accounts and Blob Containers. --- .../rules/findings/storageaccount-public-blob-container.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ScoutSuite/providers/azure/rules/findings/storageaccount-public-blob-container.json b/ScoutSuite/providers/azure/rules/findings/storageaccount-public-blob-container.json index ab3e6bd5b..e40ede450 100755 --- a/ScoutSuite/providers/azure/rules/findings/storageaccount-public-blob-container.json +++ b/ScoutSuite/providers/azure/rules/findings/storageaccount-public-blob-container.json @@ -18,7 +18,7 @@ "https://learn.microsoft.com/en-us/azure/storage/blobs/storage-manage-access-to-resources", "https://learn.microsoft.com/en-us/azure/security/benchmarks/security-controls-v2-privileged-access" ], - "dashboard_name": "Storage Accounts", + "dashboard_name": "Blob Containers", "display_path": "storageaccounts.subscriptions.id.storage_accounts.id", "path": "storageaccounts.subscriptions.id.storage_accounts.id.blob_containers.id", "conditions": [ From 22c5bf6344f34791432c294115e7f483f0400c98 Mon Sep 17 00:00:00 2001 From: Jakob Rieck Date: Wed, 10 Jan 2024 17:42:30 +0100 Subject: [PATCH 19/32] Improves 'Access Keys Not Rotated' check - Updates azure-mgmt-storage to 17.0.0 - Only consider storage accounts that allow access key access for the check - Display the access key status in the results --- ...nts.subscriptions.id.storage_accounts.html | 1 + .../storageaccounts/storage_accounts.py | 2 ++ ...torageaccount-access-keys-not-rotated.json | 24 ++++++++++++------- requirements.txt | 2 +- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/ScoutSuite/output/data/html/partials/azure/services.storageaccounts.subscriptions.id.storage_accounts.html b/ScoutSuite/output/data/html/partials/azure/services.storageaccounts.subscriptions.id.storage_accounts.html index 4d1352845..d39754fa7 100755 --- a/ScoutSuite/output/data/html/partials/azure/services.storageaccounts.subscriptions.id.storage_accounts.html +++ b/ScoutSuite/output/data/html/partials/azure/services.storageaccounts.subscriptions.id.storage_accounts.html @@ -10,6 +10,7 @@

Information

Public Traffic: {{convert_bool_to_enabled public_traffic_allowed }}
HTTPS Required: {{convert_bool_to_enabled https_traffic_enabled}}
Microsoft Trusted Services: {{convert_bool_to_enabled trusted_microsoft_services_enabled }}
+
Access Key Usage: {{convert_bool_to_enabled shared_key_access_allowed}}
Last Access Key Rotation: {{#if access_keys_last_rotation_date }} diff --git a/ScoutSuite/providers/azure/resources/storageaccounts/storage_accounts.py b/ScoutSuite/providers/azure/resources/storageaccounts/storage_accounts.py index 83812511c..855c535df 100755 --- a/ScoutSuite/providers/azure/resources/storageaccounts/storage_accounts.py +++ b/ScoutSuite/providers/azure/resources/storageaccounts/storage_accounts.py @@ -45,6 +45,8 @@ def _parse_storage_account(self, raw_storage_account): storage_account['trusted_microsoft_services_enabled'] = \ self._is_trusted_microsoft_services_enabled(raw_storage_account) storage_account['bypass'] = raw_storage_account.network_rule_set.bypass + # The default value (null) is equivalent to True + storage_account['shared_key_access_allowed'] = raw_storage_account.allow_shared_key_access != False storage_account['access_keys_last_rotation_date'] = \ self._parse_access_keys_last_rotation_date(raw_storage_account.activity_logs) storage_account['encryption_key_source'] = raw_storage_account.encryption.key_source diff --git a/ScoutSuite/providers/azure/rules/findings/storageaccount-access-keys-not-rotated.json b/ScoutSuite/providers/azure/rules/findings/storageaccount-access-keys-not-rotated.json index da580f77e..c35755525 100755 --- a/ScoutSuite/providers/azure/rules/findings/storageaccount-access-keys-not-rotated.json +++ b/ScoutSuite/providers/azure/rules/findings/storageaccount-access-keys-not-rotated.json @@ -21,18 +21,26 @@ "dashboard_name": "Storage Accounts", "path": "storageaccounts.subscriptions.id.storage_accounts.id", "conditions": [ - "or", + "and", [ - "storageaccounts.subscriptions.id.storage_accounts.id.access_keys_last_rotation_date", - "equal", - "None" + "storageaccounts.subscriptions.id.storage_accounts.id.shared_key_access_allowed", + "true", + "" ], [ - "storageaccounts.subscriptions.id.storage_accounts.id.access_keys_last_rotation_date", - "olderThan", + "or", [ - "_ARG_0_", - "days" + "storageaccounts.subscriptions.id.storage_accounts.id.access_keys_last_rotation_date", + "equal", + "None" + ], + [ + "storageaccounts.subscriptions.id.storage_accounts.id.access_keys_last_rotation_date", + "olderThan", + [ + "_ARG_0_", + "days" + ] ] ] ], diff --git a/requirements.txt b/requirements.txt index 7f53a4dfc..691eb6fc0 100755 --- a/requirements.txt +++ b/requirements.txt @@ -35,7 +35,7 @@ azure-identity==1.5.0 ## for resources azure-mgmt-resource==15.0.0 -azure-mgmt-storage==16.0.0 +azure-mgmt-storage==17.0.0 azure-mgmt-monitor==2.0.0 azure-mgmt-sql==1.0.0 azure-mgmt-security==1.0.0 From d7485d2d0e1123272132362ac954727fbb8a3840 Mon Sep 17 00:00:00 2001 From: Rennie deGraaf Date: Mon, 4 Mar 2024 15:09:23 -0800 Subject: [PATCH 20/32] AWS EBS default encryption: fixed display problems. Apparently ScoutSuite makes the implicit assumption that all settings are associated with resources, rather than directly to the region + service. So we move the regional EBS settings into a fake resource. This means that paths now need to include an ID for the "resource". --- .../aws/services.ec2.regions.id.regional_settings.html | 7 ++----- .../providers/aws/resources/ec2/regional_settings.py | 8 ++++++-- .../findings/ec2_ebs_default_encryption_disabled.json | 7 ++++--- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html index b4eec1f69..86b0f687d 100644 --- a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html +++ b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html @@ -4,13 +4,10 @@

{{region}}

- - -

Regional settings

    -
  • Encryption enabled for EBS Volumes by default: {{getValueAt 'services.ec2.regions' region 'regional_settings.ebs_encryption_default'}}
  • -
  • Default encryption key: {{getValueAt 'services.ec2.regions' region 'regional_settings.ebs_default_encryption_key_id'}}
  • +
  • Encryption enabled for EBS Volumes by default: {{ebs_encryption_default}}
  • +
  • Default encryption key: {{ebs_default_encryption_key_id}}
diff --git a/ScoutSuite/providers/aws/resources/ec2/regional_settings.py b/ScoutSuite/providers/aws/resources/ec2/regional_settings.py index 6aae176fb..73ac10b62 100644 --- a/ScoutSuite/providers/aws/resources/ec2/regional_settings.py +++ b/ScoutSuite/providers/aws/resources/ec2/regional_settings.py @@ -12,5 +12,9 @@ def __init__(self, facade: AWSFacade, region: str): self.resource_type = 'regional_setting' async def fetch_all(self): - self['ebs_encryption_default'] = await self.facade.ec2.get_ebs_encryption(self.region) - self['ebs_default_encryption_key_id'] = await self.facade.ec2.get_ebs_default_encryption_key(self.region) + # These settings are associated directly with the service+region, not with any resource. + # However, ScoutSuite seems to assume that every setting is tied to a resource so we make + # up a fake resource to hold them. + self[0] = {} + self[0]['ebs_encryption_default'] = await self.facade.ec2.get_ebs_encryption(self.region) + self[0]['ebs_default_encryption_key_id'] = await self.facade.ec2.get_ebs_default_encryption_key(self.region) diff --git a/ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json b/ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json index cc623520f..a37acef3f 100644 --- a/ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json +++ b/ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json @@ -6,13 +6,14 @@ "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html#encryption-by-default" ], "dashboard_name": "Regions", - "path": "ec2.regions.id.regional_settings.ebs_encryption_default", + "path": "ec2.regions.id.regional_settings.id", "conditions": [ "and", [ - "ec2.regions.id.regional_settings.ebs_encryption_default", + "ebs_encryption_default", "false", "" ] - ] + ], + "id_suffix": "NoDefaultEBSEncryption" } \ No newline at end of file From f90bcd04d96a1f4db4394049693b38e2c0ac3c13 Mon Sep 17 00:00:00 2001 From: Rennie deGraaf Date: Mon, 4 Mar 2024 16:12:17 -0800 Subject: [PATCH 21/32] AWS EBS default encryption: renamed files for consistency. --- ...n_disabled.json => ec2-ebs-default-encryption-disabled.json} | 0 ScoutSuite/providers/aws/rules/rulesets/default.json | 2 +- ScoutSuite/providers/aws/rules/rulesets/detailed.json | 2 +- 3 files changed, 2 insertions(+), 2 deletions(-) rename ScoutSuite/providers/aws/rules/findings/{ec2_ebs_default_encryption_disabled.json => ec2-ebs-default-encryption-disabled.json} (100%) diff --git a/ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json b/ScoutSuite/providers/aws/rules/findings/ec2-ebs-default-encryption-disabled.json similarity index 100% rename from ScoutSuite/providers/aws/rules/findings/ec2_ebs_default_encryption_disabled.json rename to ScoutSuite/providers/aws/rules/findings/ec2-ebs-default-encryption-disabled.json diff --git a/ScoutSuite/providers/aws/rules/rulesets/default.json b/ScoutSuite/providers/aws/rules/rulesets/default.json index 255c08138..61dfeb5d7 100755 --- a/ScoutSuite/providers/aws/rules/rulesets/default.json +++ b/ScoutSuite/providers/aws/rules/rulesets/default.json @@ -142,7 +142,7 @@ "level": "danger" } ], - "ec2_ebs_default_encryption_disabled.json": [ + "ec2-ebs-default-encryption-disabled.json": [ { "enabled": true, "level": "warning" diff --git a/ScoutSuite/providers/aws/rules/rulesets/detailed.json b/ScoutSuite/providers/aws/rules/rulesets/detailed.json index 004b7a5f6..38d6f366f 100755 --- a/ScoutSuite/providers/aws/rules/rulesets/detailed.json +++ b/ScoutSuite/providers/aws/rules/rulesets/detailed.json @@ -142,7 +142,7 @@ "level": "danger" } ], - "ec2_ebs_default_encryption_disabled.json": [ + "ec2-ebs-default-encryption-disabled.json": [ { "enabled": true, "level": "warning" From 01de9d0f1146f130cd27425345676f41658a093f Mon Sep 17 00:00:00 2001 From: Rennie deGraaf Date: Mon, 4 Mar 2024 16:13:03 -0800 Subject: [PATCH 22/32] AWS EBS default encryption: added tests. --- .../ec2-ebs-default-encryption-disabled.json | 1 + tests/data/rule-configs/ec2.json | 30 +++++++++++++++++++ .../ec2-ebs-default-encryption-disabled.json | 5 ++++ 3 files changed, 36 insertions(+) create mode 120000 tests/data/rule-configs/ec2-ebs-default-encryption-disabled.json create mode 100644 tests/data/rule-results/ec2-ebs-default-encryption-disabled.json diff --git a/tests/data/rule-configs/ec2-ebs-default-encryption-disabled.json b/tests/data/rule-configs/ec2-ebs-default-encryption-disabled.json new file mode 120000 index 000000000..667f7b250 --- /dev/null +++ b/tests/data/rule-configs/ec2-ebs-default-encryption-disabled.json @@ -0,0 +1 @@ +ec2.json \ No newline at end of file diff --git a/tests/data/rule-configs/ec2.json b/tests/data/rule-configs/ec2.json index 79e7173b2..ac8d2a3df 100755 --- a/tests/data/rule-configs/ec2.json +++ b/tests/data/rule-configs/ec2.json @@ -7,6 +7,12 @@ "ap-northeast-2": { "instances_count": 0, "region": "ap-northeast-2", + "regional_settings": { + "0": { + "ebs_default_encryption_key_id": "alias/aws/ebs", + "ebs_encryption_default": false + } + }, "security_groups_count": 1, "snapshots": {}, "snapshots_count": 0, @@ -59,6 +65,12 @@ "ap-south-1": { "instances_count": 0, "region": "ap-south-1", + "regional_settings": { + "0": { + "ebs_default_encryption_key_id": "alias/aws/ebs", + "ebs_encryption_default": false + } + }, "security_groups_count": 1, "snapshots": {}, "snapshots_count": 0, @@ -108,6 +120,12 @@ "eu-central-1": { "instances_count": 0, "region": "eu-central-1", + "regional_settings": { + "0": { + "ebs_default_encryption_key_id": "alias/aws/ebs", + "ebs_encryption_default": false + } + }, "security_groups_count": 1, "snapshots": {}, "snapshots_count": 0, @@ -146,6 +164,12 @@ "eu-west-1": { "instances_count": 35, "region": "eu-west-1", + "regional_settings": { + "0": { + "ebs_default_encryption_key_id": "arn:aws:kms:us-east-1:123456789012:key/12345678-90ab-cdef-1234-567890abcdef", + "ebs_encryption_default": true + } + }, "security_groups_count": 30, "vpcs": { "vpc-eu111111": { @@ -248,6 +272,12 @@ "sa-east-1": { "instances_count": 0, "region": "sa-east-1", + "regional_settings": { + "0": { + "ebs_default_encryption_key_id": "arn:aws:kms:us-east-1:123456789012:key/12345678-90ab-cdef-1234-567890abcdef", + "ebs_encryption_default": true + } + }, "security_groups_count": 1, "snapshots": {}, "snapshots_count": 0, diff --git a/tests/data/rule-results/ec2-ebs-default-encryption-disabled.json b/tests/data/rule-results/ec2-ebs-default-encryption-disabled.json new file mode 100644 index 000000000..9192b13d5 --- /dev/null +++ b/tests/data/rule-results/ec2-ebs-default-encryption-disabled.json @@ -0,0 +1,5 @@ +[ + "ec2.regions.ap-northeast-2.regional_settings.0.NoDefaultEBSEncryption", + "ec2.regions.ap-south-1.regional_settings.0.NoDefaultEBSEncryption", + "ec2.regions.eu-central-1.regional_settings.0.NoDefaultEBSEncryption" +] From d640d66d5d10a5a49cf79497ce8535de4b48f3f5 Mon Sep 17 00:00:00 2001 From: Rennie deGraaf Date: Tue, 5 Mar 2024 09:40:14 -0800 Subject: [PATCH 23/32] AWS EBS default encryption: enabled the single region template. --- .../aws/services.ec2.regions.id.regional_settings.html | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html index 86b0f687d..93e039e81 100644 --- a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html +++ b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.regional_settings.html @@ -16,12 +16,11 @@

Regional settings

Handlebars.registerPartial("services.ec2.regions.id.regional_settings", $("#services\\.ec2\\.regions\\.id\\.regional_settings\\.partial").html()); - - + - --> + From 68e919900cbf29673ed8a187e68c82bffced1660 Mon Sep 17 00:00:00 2001 From: Rennie deGraaf Date: Tue, 5 Mar 2024 09:41:24 -0800 Subject: [PATCH 24/32] AWS EBS default encryption: moved parsing logic out of the facade. --- ScoutSuite/providers/aws/facade/ec2.py | 4 ++-- ScoutSuite/providers/aws/resources/ec2/regional_settings.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/ScoutSuite/providers/aws/facade/ec2.py b/ScoutSuite/providers/aws/facade/ec2.py index 2557d3573..cca940c6a 100755 --- a/ScoutSuite/providers/aws/facade/ec2.py +++ b/ScoutSuite/providers/aws/facade/ec2.py @@ -223,7 +223,7 @@ async def get_route_tables(self, region): async def get_ebs_encryption(self, region): ec2_client = AWSFacadeUtils.get_client('ec2', self.session, region) try: - encryption_settings = await run_concurrently(lambda: ec2_client.get_ebs_encryption_by_default()['EbsEncryptionByDefault']) + encryption_settings = await run_concurrently(lambda: ec2_client.get_ebs_encryption_by_default()) return encryption_settings except Exception as e: print_exception(f'Failed to retrieve EBS encryption settings: {e}') @@ -231,7 +231,7 @@ async def get_ebs_encryption(self, region): async def get_ebs_default_encryption_key(self, region): ec2_client = AWSFacadeUtils.get_client('ec2', self.session, region) try: - encryption_key = await run_concurrently(lambda: ec2_client.get_ebs_default_kms_key_id()['KmsKeyId']) + encryption_key = await run_concurrently(lambda: ec2_client.get_ebs_default_kms_key_id()) return encryption_key except Exception as e: print_exception(f'Failed to retrieve EBS encryption key ID: {e}') diff --git a/ScoutSuite/providers/aws/resources/ec2/regional_settings.py b/ScoutSuite/providers/aws/resources/ec2/regional_settings.py index 73ac10b62..3e00cb40e 100644 --- a/ScoutSuite/providers/aws/resources/ec2/regional_settings.py +++ b/ScoutSuite/providers/aws/resources/ec2/regional_settings.py @@ -16,5 +16,5 @@ async def fetch_all(self): # However, ScoutSuite seems to assume that every setting is tied to a resource so we make # up a fake resource to hold them. self[0] = {} - self[0]['ebs_encryption_default'] = await self.facade.ec2.get_ebs_encryption(self.region) - self[0]['ebs_default_encryption_key_id'] = await self.facade.ec2.get_ebs_default_encryption_key(self.region) + self[0]['ebs_encryption_default'] = (await self.facade.ec2.get_ebs_encryption(self.region))['EbsEncryptionByDefault'] + self[0]['ebs_default_encryption_key_id'] = (await self.facade.ec2.get_ebs_default_encryption_key(self.region))['KmsKeyId'] From e500930dd1faf742735aaffbf48ca3a907cf4b0c Mon Sep 17 00:00:00 2001 From: Jakob Rieck Date: Fri, 12 Apr 2024 14:16:29 +0200 Subject: [PATCH 25/32] Updates credential report to not highlight inactive credentials --- .../aws/services.iam.credential_reports.html | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ScoutSuite/output/data/html/partials/aws/services.iam.credential_reports.html b/ScoutSuite/output/data/html/partials/aws/services.iam.credential_reports.html index 2a33dc8ed..0d49c3bce 100755 --- a/ScoutSuite/output/data/html/partials/aws/services.iam.credential_reports.html +++ b/ScoutSuite/output/data/html/partials/aws/services.iam.credential_reports.html @@ -8,16 +8,16 @@

{{name}}

Credentials Report

Creation Date: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'user_creation_time')}}
Last Used Date: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'last_used')}}
-
Password Enabled: {{getValueAt 'services' 'iam' 'credential_reports' @key 'password_enabled'}}
-
Password Last Used: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'password_last_used')}}
+
Password Enabled: {{getValueAt 'services' 'iam' 'credential_reports' @key 'password_enabled'}}
+
Password Last Used: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'password_last_used')}}
Password Last Changed: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'password_last_changed')}}
MFA Active: {{getValueAt 'services' 'iam' 'credential_reports' @key 'mfa_active'}}
Hardware MFA Active: {{getValueAt 'services' 'iam' 'credential_reports' @key 'mfa_active_hardware'}}
-
Access Key 1 Active: {{getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_1_active'}}
-
Access Key 1 Last Used: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_1_last_used_date')}}
+
Access Key 1 Active: {{getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_1_active'}}
+
Access Key 1 Last Used: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_1_last_used_date')}}
Access Key 1 Last Rotated: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_1_last_rotated')}}
-
Access Key 2 Active: {{getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_2_active'}}
-
Access Key 2 Last Used: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_2_last_used_date')}}
+
Access Key 2 Active: {{getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_2_active'}}
+
Access Key 2 Last Used: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_2_last_used_date')}}
Access Key 2 Last Rotated: {{ format_date (getValueAt 'services' 'iam' 'credential_reports' @key 'access_key_2_last_rotated')}}
Signing Cert 1 Active: {{getValueAt 'services' 'iam' 'credential_reports' @key 'cert_1_active'}}
Signing Cert 2 Active: {{getValueAt 'services' 'iam' 'credential_reports' @key 'cert_2_active'}}
From 6bd204a589e2f7a825dc101b17430e4dd50c296f Mon Sep 17 00:00:00 2001 From: Jakob Rieck Date: Tue, 16 Apr 2024 11:08:15 +0200 Subject: [PATCH 26/32] Improves iam-user-no-key-rotation rule --- .../providers/aws/rules/findings/iam-user-no-key-rotation.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ScoutSuite/providers/aws/rules/findings/iam-user-no-key-rotation.json b/ScoutSuite/providers/aws/rules/findings/iam-user-no-key-rotation.json index e26c0cde0..984b7f317 100755 --- a/ScoutSuite/providers/aws/rules/findings/iam-user-no-key-rotation.json +++ b/ScoutSuite/providers/aws/rules/findings/iam-user-no-key-rotation.json @@ -1,5 +1,5 @@ { - "description": "Lack of Key Rotation for (_ARG_0_) Days", + "description": "Lack of Key Rotation for _ARG_1_ Days (Key Status: _ARG_0_)", "rationale": "In case of access key compromise, the lack of credential rotation increases the period during which an attacker has access to the AWS account.", "remediation": "Rotate access keys that have not been changed recently", "compliance": [ From 869919ce5c0973348686d779473527d80b71bc7b Mon Sep 17 00:00:00 2001 From: Jakob Rieck Date: Wed, 17 Apr 2024 10:14:01 +0200 Subject: [PATCH 27/32] Adds highlighting for "EBS Volume Not Encrypted" detail view --- .../partials/aws/services.ec2.regions.id.volumes.html | 11 +++++++++-- .../rules/findings/ec2-ebs-volume-not-encrypted.json | 3 ++- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.volumes.html b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.volumes.html index c1e6a113c..4da0bf016 100755 --- a/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.volumes.html +++ b/ScoutSuite/output/data/html/partials/aws/services.ec2.regions.id.volumes.html @@ -4,8 +4,15 @@

{{name}}

-

Attributes

- {{> generic_object this}} +

Information

+
ID: {{value_or_none id}}
+
ARN: {{value_or_none arn}}
+
Name: {{value_or_none name}}
+
State: {{value_or_none State}}
+
Size: {{value_or_none Size}} GiB
+
Volume Type: {{value_or_none VolumeType}}
+
Create Time: {{value_or_none CreateTime}}
+
Encryption: {{convert_bool_to_enabled Encrypted}}
diff --git a/ScoutSuite/providers/aws/rules/findings/ec2-ebs-volume-not-encrypted.json b/ScoutSuite/providers/aws/rules/findings/ec2-ebs-volume-not-encrypted.json index f2c721730..224c49eb1 100755 --- a/ScoutSuite/providers/aws/rules/findings/ec2-ebs-volume-not-encrypted.json +++ b/ScoutSuite/providers/aws/rules/findings/ec2-ebs-volume-not-encrypted.json @@ -13,5 +13,6 @@ "false", "" ] - ] + ], + "id_suffix": "encrypted" } From f06df79fe0934d8601a334cbd2017e266a624e54 Mon Sep 17 00:00:00 2001 From: ltoroncc Date: Tue, 7 May 2024 09:37:11 -0400 Subject: [PATCH 28/32] Digital Ocean Integration --- README.md | 1 + ScoutSuite/__main__.py | 17 ++ ScoutSuite/core/cli_parser.py | 32 +++ .../do/services.database.databases.html | 33 +++ .../do/services.droplet.droplets.html | 42 ++++ .../do/services.kubernetes.cluster.html | 26 +++ .../do/services.networking.domains.html | 27 +++ .../do/services.networking.firewalls.html | 35 +++ .../services.networking.load_balancers.html | 26 +++ .../partials/do/services.spaces.buckets.html | 27 +++ .../output/data/html/summaries/do/.gitkeep | 0 ScoutSuite/providers/__init__.py | 3 +- .../base/authentication_strategy_factory.py | 3 +- .../providers/do/authentication_strategy.py | 49 ++++ ScoutSuite/providers/do/facade/__init__.py | 0 ScoutSuite/providers/do/facade/base.py | 19 ++ ScoutSuite/providers/do/facade/database.py | 71 ++++++ ScoutSuite/providers/do/facade/droplet.py | 46 ++++ ScoutSuite/providers/do/facade/kubernetes.py | 21 ++ ScoutSuite/providers/do/facade/networking.py | 48 ++++ ScoutSuite/providers/do/facade/spaces.py | 212 ++++++++++++++++++ ScoutSuite/providers/do/facade/utils.py | 31 +++ ScoutSuite/providers/do/metadata.json | 60 +++++ ScoutSuite/providers/do/provider.py | 50 +++++ ScoutSuite/providers/do/resources/__init__.py | 0 ScoutSuite/providers/do/resources/base.py | 22 ++ .../do/resources/database/__init__.py | 0 .../providers/do/resources/database/base.py | 14 ++ .../do/resources/database/databases.py | 70 ++++++ .../do/resources/droplet/__init__.py | 0 .../providers/do/resources/droplet/base.py | 14 ++ .../do/resources/droplet/droplets.py | 91 ++++++++ .../do/resources/kubernetes/__init__.py | 0 .../providers/do/resources/kubernetes/base.py | 14 ++ .../do/resources/kubernetes/kubernetes.py | 25 +++ .../do/resources/networking/__init__.py | 0 .../providers/do/resources/networking/base.py | 20 ++ .../do/resources/networking/domains.py | 50 +++++ .../do/resources/networking/firewalls.py | 47 ++++ .../do/resources/networking/load_balancers.py | 30 +++ .../providers/do/resources/spaces/__init__.py | 0 .../providers/do/resources/spaces/base.py | 14 ++ .../providers/do/resources/spaces/buckets.py | 50 +++++ .../providers/do/rules/filters/.gitkeep | 0 ...se-databases-mysql-publically-exposed.json | 20 ++ ...atabases-mysql-user-legacy-encryption.json | 20 ++ ...e-databases-postgres-connection-pools.json | 20 ++ ...se-databases-postgres-trusted-sources.json | 15 ++ ...base-databases-redis-evicition-policy.json | 20 ++ .../droplet-droplets-all-ports-exposed.json | 15 ++ .../droplet-droplets-backup-not-enabled.json | 17 ++ .../droplet-droplets-backup-not-present.json | 17 ++ .../droplet-droplets-custom-image.json | 16 ++ .../droplet-droplets-features-monitoring.json | 16 ++ ...roplet-droplets-firewall-not-attached.json | 17 ++ .../droplet-droplets-port-22-exposed.json | 15 ++ ...droplet-droplets-snapshot-not-present.json | 17 ++ ...bernetes-kubernetes-autoupgrade-minor.json | 17 ++ .../kubernetes-kubernetes-ha-enabled.json | 17 ++ .../kubernetes-kubernetes-surge-upgrade.json | 17 ++ .../findings/networking-domains-high-ttl.json | 15 ++ .../networking-domains-missing-dkim.json | 15 ++ .../networking-domains-missing-dmarc.json | 15 ++ .../networking-domains-missing-spf.json | 15 ++ ...working-domains-spf-overly-permissive.json | 15 ++ .../networking-firewalls-public-ports.json | 15 ++ .../networking-firewalls-quad-zero.json | 15 ++ ...d-balancer-backend-keepalive-disabled.json | 15 ++ ...g-load-balancer-ssl-redirect-disabled.json | 15 ++ ...working-load-balancer-without-droplet.json | 15 ++ .../rules/findings/spaces-buckets-cors.json | 17 ++ .../findings/spaces-buckets-public-read.json | 17 ++ .../findings/spaces-buckets-public-write.json | 17 ++ .../providers/do/rules/rulesets/default.json | 167 ++++++++++++++ .../providers/do/rules/rulesets/filters.json | 4 + ScoutSuite/providers/do/services.py | 25 +++ ScoutSuite/providers/do/utils.py | 23 ++ requirements.txt | 5 +- tools/process_raw_response.py | 5 +- 79 files changed, 2042 insertions(+), 4 deletions(-) create mode 100644 ScoutSuite/output/data/html/partials/do/services.database.databases.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.droplet.droplets.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.kubernetes.cluster.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.networking.domains.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.networking.firewalls.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.networking.load_balancers.html create mode 100644 ScoutSuite/output/data/html/partials/do/services.spaces.buckets.html create mode 100644 ScoutSuite/output/data/html/summaries/do/.gitkeep create mode 100644 ScoutSuite/providers/do/authentication_strategy.py create mode 100644 ScoutSuite/providers/do/facade/__init__.py create mode 100644 ScoutSuite/providers/do/facade/base.py create mode 100644 ScoutSuite/providers/do/facade/database.py create mode 100644 ScoutSuite/providers/do/facade/droplet.py create mode 100644 ScoutSuite/providers/do/facade/kubernetes.py create mode 100644 ScoutSuite/providers/do/facade/networking.py create mode 100644 ScoutSuite/providers/do/facade/spaces.py create mode 100644 ScoutSuite/providers/do/facade/utils.py create mode 100644 ScoutSuite/providers/do/metadata.json create mode 100644 ScoutSuite/providers/do/provider.py create mode 100644 ScoutSuite/providers/do/resources/__init__.py create mode 100644 ScoutSuite/providers/do/resources/base.py create mode 100644 ScoutSuite/providers/do/resources/database/__init__.py create mode 100644 ScoutSuite/providers/do/resources/database/base.py create mode 100644 ScoutSuite/providers/do/resources/database/databases.py create mode 100644 ScoutSuite/providers/do/resources/droplet/__init__.py create mode 100644 ScoutSuite/providers/do/resources/droplet/base.py create mode 100644 ScoutSuite/providers/do/resources/droplet/droplets.py create mode 100644 ScoutSuite/providers/do/resources/kubernetes/__init__.py create mode 100644 ScoutSuite/providers/do/resources/kubernetes/base.py create mode 100644 ScoutSuite/providers/do/resources/kubernetes/kubernetes.py create mode 100644 ScoutSuite/providers/do/resources/networking/__init__.py create mode 100644 ScoutSuite/providers/do/resources/networking/base.py create mode 100644 ScoutSuite/providers/do/resources/networking/domains.py create mode 100644 ScoutSuite/providers/do/resources/networking/firewalls.py create mode 100644 ScoutSuite/providers/do/resources/networking/load_balancers.py create mode 100644 ScoutSuite/providers/do/resources/spaces/__init__.py create mode 100644 ScoutSuite/providers/do/resources/spaces/base.py create mode 100644 ScoutSuite/providers/do/resources/spaces/buckets.py create mode 100644 ScoutSuite/providers/do/rules/filters/.gitkeep create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-mysql-publically-exposed.json create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-mysql-user-legacy-encryption.json create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-postgres-connection-pools.json create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-postgres-trusted-sources.json create mode 100644 ScoutSuite/providers/do/rules/findings/database-databases-redis-evicition-policy.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-all-ports-exposed.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-enabled.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-present.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-custom-image.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-features-monitoring.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-firewall-not-attached.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-port-22-exposed.json create mode 100644 ScoutSuite/providers/do/rules/findings/droplet-droplets-snapshot-not-present.json create mode 100644 ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-autoupgrade-minor.json create mode 100644 ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-ha-enabled.json create mode 100644 ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-surge-upgrade.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-high-ttl.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-missing-dkim.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-missing-dmarc.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-missing-spf.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-domains-spf-overly-permissive.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-firewalls-public-ports.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-firewalls-quad-zero.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-load-balancer-backend-keepalive-disabled.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-load-balancer-ssl-redirect-disabled.json create mode 100644 ScoutSuite/providers/do/rules/findings/networking-load-balancer-without-droplet.json create mode 100644 ScoutSuite/providers/do/rules/findings/spaces-buckets-cors.json create mode 100644 ScoutSuite/providers/do/rules/findings/spaces-buckets-public-read.json create mode 100644 ScoutSuite/providers/do/rules/findings/spaces-buckets-public-write.json create mode 100644 ScoutSuite/providers/do/rules/rulesets/default.json create mode 100644 ScoutSuite/providers/do/rules/rulesets/filters.json create mode 100644 ScoutSuite/providers/do/services.py create mode 100644 ScoutSuite/providers/do/utils.py diff --git a/README.md b/README.md index 93a3d1763..4d5acb98a 100755 --- a/README.md +++ b/README.md @@ -30,6 +30,7 @@ The following cloud providers are currently supported: - Alibaba Cloud (alpha) - Oracle Cloud Infrastructure (alpha) - Kubernetes clusters on a cloud provider (alpha) +- DigitalOcean Cloud (alpha) ## Installation diff --git a/ScoutSuite/__main__.py b/ScoutSuite/__main__.py index 24fe31300..292267097 100755 --- a/ScoutSuite/__main__.py +++ b/ScoutSuite/__main__.py @@ -61,6 +61,10 @@ def run_from_cli(): kubernetes_context=args.get('kubernetes_context'), kubernetes_persist_config=args.get('kubernetes_persist_config'), kubernetes_azure_subscription_id=args.get('kubernetes_azure_subscription_id'), + #DigitalOcean + token=args.get('token'), + access_key=args.get('access_key'), + access_secret=args.get('access_secret'), # General report_name=args.get('report_name'), report_dir=args.get('report_dir'), timestamp=args.get('timestamp'), @@ -113,6 +117,10 @@ def run(provider, kubernetes_context=None, kubernetes_persist_config=True, kubernetes_azure_subscription_id=None, + #DigitalOcean + token=None, + access_key=None, + access_secret=None, # General report_name=None, report_dir=None, timestamp=False, @@ -171,6 +179,10 @@ async def _run(provider, kubernetes_context, kubernetes_persist_config, kubernetes_azure_subscription_id, + #DigitalOcean + token, + access_key, + access_secret, # General report_name, report_dir, timestamp, @@ -221,6 +233,11 @@ async def _run(provider, access_key_id=access_key_id, access_key_secret=access_key_secret, + #DigitalOcean + token=token, + access_key=access_key, + access_secret=access_secret, + # Kubernetes kubernetes_cluster_provider=kubernetes_cluster_provider, kubernetes_config_file=kubernetes_config_file, diff --git a/ScoutSuite/core/cli_parser.py b/ScoutSuite/core/cli_parser.py index 9a2d72fc9..161dd417b 100755 --- a/ScoutSuite/core/cli_parser.py +++ b/ScoutSuite/core/cli_parser.py @@ -30,6 +30,7 @@ def __init__(self): self._init_aliyun_parser() self._init_oci_parser() self._init_kubernetes_parser() + self._init_do_parser() def _init_aws_parser(self): parser = self.subparsers.add_parser("aws", @@ -254,6 +255,32 @@ def _init_oci_parser(self): dest='profile', default=None, help='Name of the profile') + + def _init_do_parser(self): + do_parser = self.subparsers.add_parser("do", + parents=[self.common_providers_args_parser], + help="Run Scout against an DigitalOcean account") + + parser = do_parser.add_argument_group('Authentication parameters') + + parser.add_argument('-t', + '--token', + action='store', + default=None, + dest='token', + help='DO Token') + + parser.add_argument('--access_key', + action='store', + default=None, + dest='access_key', + help='Spaces Access Key ID') + parser.add_argument('--access_secret', + action='store', + default=None, + dest='access_secret', + help='Spaces Secret Access Key') + def _init_kubernetes_parser(self): kubernetes_parser = self.subparsers.add_parser("kubernetes", @@ -436,6 +463,11 @@ def parse_args(self, args=None): if v.get('subscription_ids') and v.get('all_subscriptions'): self.parser.error('--subscription-ids and --all-subscriptions are mutually exclusive options') + # DigitalOcean + if v.get('provider') == 'do': + if (v.get('access_key') or v.get('access_secret')) and not (v.get('access_key') and v.get('access_secret')): + self.parser.error('For DO Spaces service please provide both --access_key and --access_secret') + # Kubernetes elif v.get('provider') == 'kubernetes': cluster_provider = v.get('kubernetes_cluster_provider') diff --git a/ScoutSuite/output/data/html/partials/do/services.database.databases.html b/ScoutSuite/output/data/html/partials/do/services.database.databases.html new file mode 100644 index 000000000..c31b818b8 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.database.databases.html @@ -0,0 +1,33 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.droplet.droplets.html b/ScoutSuite/output/data/html/partials/do/services.droplet.droplets.html new file mode 100644 index 000000000..b4987d0f9 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.droplet.droplets.html @@ -0,0 +1,42 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.kubernetes.cluster.html b/ScoutSuite/output/data/html/partials/do/services.kubernetes.cluster.html new file mode 100644 index 000000000..c22dbce8d --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.kubernetes.cluster.html @@ -0,0 +1,26 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.networking.domains.html b/ScoutSuite/output/data/html/partials/do/services.networking.domains.html new file mode 100644 index 000000000..ed04717f7 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.networking.domains.html @@ -0,0 +1,27 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.networking.firewalls.html b/ScoutSuite/output/data/html/partials/do/services.networking.firewalls.html new file mode 100644 index 000000000..f0647f9a6 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.networking.firewalls.html @@ -0,0 +1,35 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.networking.load_balancers.html b/ScoutSuite/output/data/html/partials/do/services.networking.load_balancers.html new file mode 100644 index 000000000..afb2ba988 --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.networking.load_balancers.html @@ -0,0 +1,26 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/partials/do/services.spaces.buckets.html b/ScoutSuite/output/data/html/partials/do/services.spaces.buckets.html new file mode 100644 index 000000000..7b550677d --- /dev/null +++ b/ScoutSuite/output/data/html/partials/do/services.spaces.buckets.html @@ -0,0 +1,27 @@ + + + + + + + + \ No newline at end of file diff --git a/ScoutSuite/output/data/html/summaries/do/.gitkeep b/ScoutSuite/output/data/html/summaries/do/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/__init__.py b/ScoutSuite/providers/__init__.py index a00fe0a63..e92d522b6 100755 --- a/ScoutSuite/providers/__init__.py +++ b/ScoutSuite/providers/__init__.py @@ -3,7 +3,8 @@ 'azure': 'AzureProvider', 'aliyun': 'AliyunProvider', 'oci': 'OracleProvider', - 'kubernetes': 'KubernetesProvider'} + 'kubernetes': 'KubernetesProvider', + 'do': 'DigitalOceanProvider'} def get_provider_object(provider): diff --git a/ScoutSuite/providers/base/authentication_strategy_factory.py b/ScoutSuite/providers/base/authentication_strategy_factory.py index a6eee9bdf..6a55c8881 100755 --- a/ScoutSuite/providers/base/authentication_strategy_factory.py +++ b/ScoutSuite/providers/base/authentication_strategy_factory.py @@ -4,7 +4,8 @@ 'azure': 'AzureAuthenticationStrategy', 'aliyun': 'AliyunAuthenticationStrategy', 'oci': 'OracleAuthenticationStrategy', - 'kubernetes': 'KubernetesAuthenticationStrategy' + 'kubernetes': 'KubernetesAuthenticationStrategy', + 'do': 'DigitalOceanAuthenticationStrategy' } diff --git a/ScoutSuite/providers/do/authentication_strategy.py b/ScoutSuite/providers/do/authentication_strategy.py new file mode 100644 index 000000000..1d4e17b1f --- /dev/null +++ b/ScoutSuite/providers/do/authentication_strategy.py @@ -0,0 +1,49 @@ +from ScoutSuite.providers.do import utils +from ScoutSuite.providers.base.authentication_strategy import AuthenticationException +from ScoutSuite.providers.base.authentication_strategy import AuthenticationStrategy +from ScoutSuite.core.console import print_warning +from pydo import Client +import logging +import boto3 + + +class DoCredentials: + def __init__(self, client, session=None): + self.client = client + self.session = session + + +class DigitalOceanAuthenticationStrategy(AuthenticationStrategy): + + def authenticate(self, token=None, access_key=None, access_secret=None, **kwargs): + """ + Handles authentication to DigitalOcean. + """ + try: + self.client = Client(token) + # a simple request here to make sure the authentication is successful + self.client.account.get() + + if not (access_key and access_secret): + print_warning( + f"Missing credentials for spaces: Skipping DO Spaces service" + ) + return DoCredentials(client=self.client) + else: + # Set logging level to error for libraries as otherwise generates a lot of warnings + logging.getLogger("botocore").setLevel(logging.ERROR) + logging.getLogger("botocore.auth").setLevel(logging.ERROR) + logging.getLogger("urllib3").setLevel(logging.ERROR) + + session = boto3.Session( + aws_access_key_id=access_key, + aws_secret_access_key=access_secret, + ) + # make sure the DO spaces authentication is successful + region = "blr1" + spaces_client = utils.get_client("s3", session, region) + spaces_client.list_buckets() + return DoCredentials(client=self.client, session=session) + + except Exception as e: + raise AuthenticationException(e) diff --git a/ScoutSuite/providers/do/facade/__init__.py b/ScoutSuite/providers/do/facade/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/facade/base.py b/ScoutSuite/providers/do/facade/base.py new file mode 100644 index 000000000..993df320b --- /dev/null +++ b/ScoutSuite/providers/do/facade/base.py @@ -0,0 +1,19 @@ +from ScoutSuite.providers.do.facade.droplet import DropletFacade +from ScoutSuite.providers.do.facade.networking import Networkingfacade +from ScoutSuite.providers.do.facade.database import DatabasesFacade +from ScoutSuite.providers.do.facade.spaces import SpacesFacade +from ScoutSuite.providers.do.facade.kubernetes import KubernetesDoFacade +from ScoutSuite.providers.do.authentication_strategy import DoCredentials + + +class DoFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._instantiate_facades() + + def _instantiate_facades(self): + self.droplet = DropletFacade(self._credentials) + self.networking = Networkingfacade(self._credentials) + self.database = DatabasesFacade(self._credentials) + self.spaces = SpacesFacade(self._credentials) + self.kubernetes = KubernetesDoFacade(self._credentials) diff --git a/ScoutSuite/providers/do/facade/database.py b/ScoutSuite/providers/do/facade/database.py new file mode 100644 index 000000000..ea59adce4 --- /dev/null +++ b/ScoutSuite/providers/do/facade/database.py @@ -0,0 +1,71 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.utils import run_concurrently + + +class DatabasesFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + + async def get_databases(self): + try: + databases = await run_concurrently( + lambda: self._client.databases.list_clusters()["databases"] + ) + return databases + except Exception as e: + print_exception(f"Failed to get databases: {e}") + return [] + + async def get_databaseusers(self, db_uuid): + try: + db_users = await run_concurrently( + lambda: self._client.databases.list_users(db_uuid)["users"] + ) + return db_users + except Exception as e: + print_exception(f"Failed to get db users: {e}") + return [] + + async def get_eviction_policy(self, db_uuid): + try: + eviction_policy = await run_concurrently( + lambda: self._client.databases.get_eviction_policy(db_uuid)[ + "eviction_policy" + ] + ) + return eviction_policy + except Exception as e: + print_exception(f"Failed to get Redis eviction policy: {e}") + return [] + + async def get_connection_pools(self, db_uuid): + try: + connection_pools = await run_concurrently( + lambda: self._client.databases.list_connection_pools(db_uuid)["pools"] + ) + return connection_pools + except Exception as e: + print_exception(f"Failed to get Postgres connection pools: {e}") + return [] + + async def get_firewalls(self, db_uuid): + try: + firewall_rules = await run_concurrently( + lambda: self._client.databases.list_firewall_rules(db_uuid) + ) + return firewall_rules + except Exception as e: + print_exception(f"Failed to get db firewalls: {e}") + return [] + + async def get_resources(self, tag): + try: + resources = await run_concurrently( + lambda: self._client.tags.get(tag)["tag"]["resources"] + ) + return resources + except Exception as e: + print_exception(f"Failed to get tag resources: {e}") + return [] diff --git a/ScoutSuite/providers/do/facade/droplet.py b/ScoutSuite/providers/do/facade/droplet.py new file mode 100644 index 000000000..32944a7db --- /dev/null +++ b/ScoutSuite/providers/do/facade/droplet.py @@ -0,0 +1,46 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.utils import run_concurrently +from ScoutSuite.providers.do.facade.utils import DOFacadeUtils + + +class DropletFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + self.current_page = 1 + self.per_page = 50 + + async def get_droplets(self): + try: + droplets = await DOFacadeUtils.get_all_from_pagination( + self._client.droplets.list, self.current_page, self.per_page, "droplets" + ) + return droplets["droplets"] + except Exception as e: + print_exception(f"Failed to get droplets: {e}") + return [] + + async def get_droplet_fwconfig(self, id): + try: + filters = {"droplet_id": id} + droplet_fwconfig = await DOFacadeUtils.get_all_from_pagination( + self._client.droplets.list_firewalls, + self.current_page, + self.per_page, + "firewalls", + filters, + ) + return droplet_fwconfig + except Exception as e: + print_exception(f"Failed to get droplet firewall config: {e}") + return [] + + # TODO not required for now + # async def get_droplet_details(self, id): + # try: + # droplets = await run_concurrently(lambda: self._client.droplets.list()['droplets']) + # return droplets + # except Exception as e: + # print_exception(f'Failed to get do droplets: {e}') + # return [] diff --git a/ScoutSuite/providers/do/facade/kubernetes.py b/ScoutSuite/providers/do/facade/kubernetes.py new file mode 100644 index 000000000..59716e5dd --- /dev/null +++ b/ScoutSuite/providers/do/facade/kubernetes.py @@ -0,0 +1,21 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.utils import run_concurrently + + +class KubernetesDoFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + + async def get_kubernetes(self): + try: + kubernetes = await run_concurrently( + lambda: self._client.kubernetes.list_clusters()["kubernetes_clusters"] + ) + return kubernetes + except Exception as e: + print_exception(f"Failed to get kubernetes clusters: {e}") + return [] + + \ No newline at end of file diff --git a/ScoutSuite/providers/do/facade/networking.py b/ScoutSuite/providers/do/facade/networking.py new file mode 100644 index 000000000..1b5b760b6 --- /dev/null +++ b/ScoutSuite/providers/do/facade/networking.py @@ -0,0 +1,48 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.do.facade.utils import DOFacadeUtils +from ScoutSuite.providers.utils import run_concurrently + + +class Networkingfacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + self.current_page = 1 + self.per_page = 50 + + async def get_firewalls(self): + try: + firewalls = await DOFacadeUtils.get_all_from_pagination( + self._client.firewalls.list, + self.current_page, + self.per_page, + "firewalls", + ) + return firewalls["firewalls"] + except Exception as e: + print_exception(f"Failed to get firewalls: {e}") + return [] + + async def get_domains(self): + try: + domains = await DOFacadeUtils.get_all_from_pagination( + self._client.domains.list, self.current_page, self.per_page, "domains" + ) + return domains["domains"] + except Exception as e: + print_exception(f"Failed to get domains: {e}") + return [] + + async def get_load_balancers(self): + try: + load_balancers = await DOFacadeUtils.get_all_from_pagination( + self._client.load_balancers.list, + self.current_page, + self.per_page, + "load_balancers", + ) + return load_balancers["load_balancers"] + except Exception as e: + print_exception(f"Failed to get load balancers: {e}") + return [] diff --git a/ScoutSuite/providers/do/facade/spaces.py b/ScoutSuite/providers/do/facade/spaces.py new file mode 100644 index 000000000..cda4c3ac3 --- /dev/null +++ b/ScoutSuite/providers/do/facade/spaces.py @@ -0,0 +1,212 @@ +from botocore.exceptions import ClientError +import boto3 +from ScoutSuite.core.console import print_exception, print_debug, print_warning +from ScoutSuite.providers.aws.facade.utils import AWSFacadeUtils +from ScoutSuite.providers.utils import run_concurrently, get_and_set_concurrently +from ScoutSuite.providers.do.authentication_strategy import DoCredentials + + +class SpacesFacade: + def __init__(self, credentials: DoCredentials): + self._credentials = credentials + self._client = credentials.client + self.session = credentials.session + + async def get_all_buckets(self): + buckets = [] + # TODO no api avaialible to get do regions that support spaces. + region_list = ["nyc3", "sfo2", "sfo3", "ams3", "fra1", "sgp1", "syd1", "blr1"] + + for region in region_list: + region_buckets = await self.get_buckets(region) + buckets.extend(region_buckets) + return buckets + + async def get_buckets(self, region=None): + try: + buckets = [] + exception = None + try: + client = self.get_client("s3", self.session, region) + buckets = await run_concurrently( + lambda: client.list_buckets()["Buckets"] + ) + except Exception as e: + exception = e + else: + exception = None # Fix for https://github.com/nccgroup/ScoutSuite/issues/916#issuecomment-728783965 + if not buckets: + if exception: + print_exception(f"Failed to list buckets: {exception}") + return [] + except Exception as e: + print_exception(f"Failed to list buckets: {e}") + return [] + else: + # We need first to retrieve bucket locations before retrieving bucket details + await get_and_set_concurrently( + [self._get_and_set_s3_bucket_location], buckets, region=region + ) + + # Then we can retrieve bucket details concurrently + await get_and_set_concurrently( + [ + self._get_and_set_s3_acls, + self._get_CORS + ], + buckets, + ) + return buckets + + async def _get_CORS(self, bucket: {}, region=None): + client = self.get_client("s3", self.session, bucket["region"]) + try: + # Attempt to get the CORS configuration + response = client.get_bucket_cors(Bucket=bucket["Name"]) + if 'CORSRules' in response: + bucket["CORS"] = response['CORSRules'] + else: + print("CORS rules are not set for this bucket.") + except ClientError as e: + if e.response['Error']['Code'] == 'InvalidAccessKeyId': + print("The AWS Access Key Id provided does not exist in our records.") + except Exception as e: + print(f"An unexpected error occurred: {str(e)}") + + async def _get_and_set_s3_bucket_location(self, bucket: {}, region=None): + client = self.get_client("s3", self.session, region) + try: + location = await run_concurrently( + lambda: client.get_bucket_location(Bucket=bucket["Name"]) + ) + except Exception as e: + if "NoSuchBucket" in str(e) or "InvalidToken" in str(e): + print_warning( + "Failed to get bucket location for {}: {}".format(bucket["Name"], e) + ) + else: + print_exception( + "Failed to get bucket location for {}: {}".format(bucket["Name"], e) + ) + location = None + + if location: + region = ( + location["LocationConstraint"] + if location["LocationConstraint"] + else "us-east-1" + ) + + # Fixes issue #59: location constraint can be either EU or eu-west-1 for Ireland... + if region == "EU": + region = "eu-west-1" + else: + region = None + + bucket["region"] = region + + async def _get_and_set_s3_acls(self, bucket: {}, key_name=None): + bucket_name = bucket["Name"] + client = self.get_client("s3", self.session, bucket["region"]) + try: + grantees = {} + if key_name: + grants = await run_concurrently( + lambda: client.get_object_acl(Bucket=bucket_name, Key=key_name) + ) + else: + grants = await run_concurrently( + lambda: client.get_bucket_acl(Bucket=bucket_name) + ) + for grant in grants["Grants"]: + if "ID" in grant["Grantee"]: + grantee = grant["Grantee"]["ID"] + display_name = ( + grant["Grantee"]["DisplayName"] + if "DisplayName" in grant["Grantee"] + else grant["Grantee"]["ID"] + ) + elif "URI" in grant["Grantee"]: + grantee = grant["Grantee"]["URI"].split("/")[-1] + display_name = self._s3_group_to_string(grant["Grantee"]["URI"]) + else: + grantee = display_name = "Unknown" + permission = grant["Permission"] + grantees.setdefault(grantee, {}) + grantees[grantee]["DisplayName"] = display_name + if "URI" in grant["Grantee"]: + grantees[grantee]["URI"] = grant["Grantee"]["URI"] + grantees[grantee].setdefault("permissions", self._init_s3_permissions()) + self._set_s3_permissions(grantees[grantee]["permissions"], permission) + bucket["grantees"] = grantees + except Exception as e: + if "NoSuchBucket" in str(e) or "InvalidToken" in str(e): + print_warning(f"Failed to get ACL configuration for {bucket_name}: {e}") + else: + print_exception( + f"Failed to get ACL configuration for {bucket_name}: {e}" + ) + bucket["grantees"] = {} + + @staticmethod + def get_client(service: str, session: boto3.session.Session, region: str = None): + """ + Instantiates an AWS API client + + :param service: Service targeted, e.g. ec2 + :param session: The aws session + :param region: Region desired, e.g. us-east-2 + + :return: + """ + + try: + return ( + session.client( + service, + region_name=region, + endpoint_url="https://" + region + ".digitaloceanspaces.com", + ) + if region + else session.client(service) + ) + except Exception as e: + print_exception(f"Failed to create client for the {service} service: {e}") + return None + + @staticmethod + def _init_s3_permissions(): + permissions = { + "read": False, + "write": False, + "read_acp": False, + "write_acp": False, + } + return permissions + + @staticmethod + def _set_s3_permissions(permissions: str, name: str): + if name == "READ" or name == "FULL_CONTROL": + permissions["read"] = True + if name == "WRITE" or name == "FULL_CONTROL": + permissions["write"] = True + if name == "READ_ACP" or name == "FULL_CONTROL": + permissions["read_acp"] = True + if name == "WRITE_ACP" or name == "FULL_CONTROL": + permissions["write_acp"] = True + + @staticmethod + def _s3_group_to_string(uri: str): + if uri == "http://acs.amazonaws.com/groups/global/AuthenticatedUsers": + return "Authenticated users" + elif uri == "http://acs.amazonaws.com/groups/global/AllUsers": + return "Everyone" + elif uri == "http://acs.amazonaws.com/groups/s3/LogDelivery": + return "Log delivery" + else: + return uri + + @staticmethod + def _status_to_bool(value: str): + """Converts a string to True if it is equal to 'Enabled' or to False otherwise.""" + return value == "Enabled" diff --git a/ScoutSuite/providers/do/facade/utils.py b/ScoutSuite/providers/do/facade/utils.py new file mode 100644 index 000000000..b377bc7a7 --- /dev/null +++ b/ScoutSuite/providers/do/facade/utils.py @@ -0,0 +1,31 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.utils import run_concurrently + + +class DOFacadeUtils: + + @staticmethod + async def get_all_from_pagination( + list_client, current_page, per_page, object_name, filters=None + ): + final_output = {} + next_page = True + while next_page: + if filters: + resp = await run_concurrently( + lambda: list_client(**filters, per_page=per_page, page=current_page) + ) + else: + resp = await run_concurrently( + lambda: list_client(per_page=per_page, page=current_page) + ) + if object_name in final_output.keys(): + final_output[object_name].extend(resp[object_name]) + else: + final_output[object_name] = resp[object_name] + + pages = resp.get("links").get("pages", {}) + next_page = "next" in pages.keys() + current_page += 1 + return final_output diff --git a/ScoutSuite/providers/do/metadata.json b/ScoutSuite/providers/do/metadata.json new file mode 100644 index 000000000..1b78bbfe8 --- /dev/null +++ b/ScoutSuite/providers/do/metadata.json @@ -0,0 +1,60 @@ +{ + "Droplets": { + "droplet": { + "resources": { + "droplets": { + "cols": 2, + "path": "services.droplet.droplets" + } + } + } + }, + "Storage": { + "spaces": { + "resources": { + "buckets": { + "cols": 2, + "path": "services.spaces.buckets" + } + } + } + }, + "Network": { + "networking": { + "resources": { + "firewalls": { + "cols": 2, + "path": "services.networking.firewalls" + }, + "domains": { + "cols": 2, + "path": "services.networking.domains" + }, + "load_balancers": { + "cols": 2, + "path": "services.networking.load_balancers" + } + } + } + }, + "Kubernetes": { + "kubernetes": { + "resources": { + "kubernetes": { + "cols": 2, + "path": "services.kubernetes.kubernetes" + } + } + } + }, + "Databases": { + "database": { + "resources": { + "databases": { + "cols": 2, + "path": "services.database.databases" + } + } + } + } +} diff --git a/ScoutSuite/providers/do/provider.py b/ScoutSuite/providers/do/provider.py new file mode 100644 index 000000000..ba2987e68 --- /dev/null +++ b/ScoutSuite/providers/do/provider.py @@ -0,0 +1,50 @@ +import os +from ScoutSuite.providers.do.services import DigitalOceanServicesConfig +from ScoutSuite.providers.base.provider import BaseProvider + + +class DigitalOceanProvider(BaseProvider): + """ + Implements provider for DigitalOcean + """ + + def __init__( + self, + report_dir=None, + timestamp=None, + services=None, + skipped_services=None, + **kwargs, + ): + + services = [] if services is None else services + skipped_services = [] if skipped_services is None else skipped_services + + self.metadata_path = ( + "%s/metadata.json" % os.path.split(os.path.abspath(__file__))[0] + ) + + self.provider_code = "do" + self.provider_name = "DigitalOcean" + self.environment = "default" + + self.services_config = DigitalOceanServicesConfig + + self.credentials = kwargs["credentials"] + self.account_id = self.credentials.client.account.get() + self.account_id = self.account_id["account"]["uuid"] + + super().__init__(report_dir, timestamp, services, skipped_services) + + def get_report_name(self): + """ + Returns the name of the report using the provider's configuration + """ + if self.account_id: + return f"do-{self.account_id}" + else: + return "do" + + def preprocessing(self, ip_ranges=None, ip_ranges_name_key=None): + + super().preprocessing() diff --git a/ScoutSuite/providers/do/resources/__init__.py b/ScoutSuite/providers/do/resources/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/base.py b/ScoutSuite/providers/do/resources/base.py new file mode 100644 index 000000000..eeb13e981 --- /dev/null +++ b/ScoutSuite/providers/do/resources/base.py @@ -0,0 +1,22 @@ +"""This module provides implementations for Resources and CompositeResources for DO.""" + +import abc + +from ScoutSuite.providers.base.resources.base import Resources, CompositeResources + + +class DoResources(Resources, metaclass=abc.ABCMeta): + """This is the base class for DO resources.""" + + pass + + +class DoCompositeResources(DoResources, CompositeResources, metaclass=abc.ABCMeta): + """This class represents a collection of composite Resources (resources that include nested resources referred as + their children). Classes extending DoCompositeResources have to define a '_children' attribute which consists of + a list of tuples describing the children. The tuples are expected to respect the following format: + (, ). 'child_name' is used to indicate the name under which the child resources will be + stored in the parent object. + """ + + pass diff --git a/ScoutSuite/providers/do/resources/database/__init__.py b/ScoutSuite/providers/do/resources/database/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/database/base.py b/ScoutSuite/providers/do/resources/database/base.py new file mode 100644 index 000000000..6baec57a5 --- /dev/null +++ b/ScoutSuite/providers/do/resources/database/base.py @@ -0,0 +1,14 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.database.databases import Databases + + +class Databases(DoCompositeResources): + _children = [(Databases, "databases")] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "database" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/database/databases.py b/ScoutSuite/providers/do/resources/database/databases.py new file mode 100644 index 000000000..2a880c342 --- /dev/null +++ b/ScoutSuite/providers/do/resources/database/databases.py @@ -0,0 +1,70 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class Databases(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + clusters = await self.facade.database.get_databases() + if clusters: + for cluster in clusters: + id, cluster = await self._parse_cluster(cluster) + self[id] = cluster + + async def _parse_cluster(self, raw_cluster): + cluster_dict = {} + + cluster_dict["id"] = raw_cluster["id"] + cluster_dict["name"] = raw_cluster["name"] + cluster_dict["engine"] = raw_cluster["engine"] + cluster_dict["version"] = raw_cluster["version"] + if raw_cluster["engine"] != "mongodb": + cluster_dict["semantic_version"] = raw_cluster["semantic_version"] + cluster_dict["tags"] = raw_cluster["tags"] + cluster_dict["databases"] = str(raw_cluster["db_names"]) + + trusted_sources = set() + cluster_databases = await self.facade.database.get_firewalls(raw_cluster["id"]) + if cluster_databases: + for cluster_rule in cluster_databases["rules"]: + trusted_sources.add(f"{cluster_rule['type']}s:{cluster_rule['value']}") + + cluster_dict["trusted_sources"] = ( + trusted_sources if trusted_sources else "False" + ) + + if raw_cluster["engine"] == "mysql": + legacy_encryption_users = set() + db_users = await self.facade.database.get_databaseusers(raw_cluster["id"]) + if db_users: + for db_user in db_users: + if ( + db_user["mysql_settings"]["auth_plugin"] + == "mysql_native_password" + ): + legacy_encryption_users.add(db_user["name"]) + + if legacy_encryption_users == "None": + cluster_dict["legacy_encryption_users"] = "True" + else: + cluster_dict["legacy_encryption_users"] = ( + str(legacy_encryption_users) if legacy_encryption_users else "False" + ) + + + elif raw_cluster["engine"] == "redis": + cluster_dict["eviction_policy"] = ( + await self.facade.database.get_eviction_policy(raw_cluster["id"]) + ) + + elif raw_cluster["engine"] == "pg": + connection_pools = await self.facade.database.get_connection_pools( + raw_cluster["id"] + ) + cluster_dict["connection_pools"] = ( + connection_pools if connection_pools else "False" + ) + + return cluster_dict["id"], cluster_dict diff --git a/ScoutSuite/providers/do/resources/droplet/__init__.py b/ScoutSuite/providers/do/resources/droplet/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/droplet/base.py b/ScoutSuite/providers/do/resources/droplet/base.py new file mode 100644 index 000000000..5a5ebdf1a --- /dev/null +++ b/ScoutSuite/providers/do/resources/droplet/base.py @@ -0,0 +1,14 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.droplet.droplets import Droplets + + +class Droplets(DoCompositeResources): + _children = [(Droplets, "droplets")] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "droplet" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/droplet/droplets.py b/ScoutSuite/providers/do/resources/droplet/droplets.py new file mode 100644 index 000000000..4444af5da --- /dev/null +++ b/ScoutSuite/providers/do/resources/droplet/droplets.py @@ -0,0 +1,91 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class Droplets(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + + droplets = await self.facade.droplet.get_droplets() + if droplets: + for droplet in droplets: + id, droplet = await self._parse_droplet(droplet) + self[id] = droplet + + async def _parse_droplet(self, raw_droplet): + droplet_dict = {} + + droplet_dict["id"] = raw_droplet["id"] + droplet_dict["name"] = raw_droplet["name"] + droplet_dict["memory"] = raw_droplet["memory"] + droplet_dict["vcpus"] = raw_droplet["vcpus"] + droplet_dict["disk"] = raw_droplet["disk"] + droplet_dict["locked"] = raw_droplet["locked"] + droplet_dict["status"] = raw_droplet["status"] + droplet_dict["kernel"] = raw_droplet["kernel"] + droplet_dict["created_at"] = raw_droplet["created_at"] + droplet_dict["features"] = raw_droplet["features"] + droplet_dict["backup_ids"] = str(raw_droplet["backup_ids"]) + droplet_dict["next_backup_window"] = raw_droplet["next_backup_window"] + droplet_dict["snapshot_ids"] = str(raw_droplet["snapshot_ids"]) + droplet_dict["image"] = raw_droplet["image"]["slug"] + droplet_dict["image_type"] = raw_droplet["image"]["type"] + droplet_dict["volume_ids"] = str(raw_droplet["volume_ids"]) + droplet_dict["size"] = raw_droplet["size"]["slug"] + droplet_dict["size_slug"] = raw_droplet["size_slug"] + droplet_dict["networks"] = str(raw_droplet["networks"]) + droplet_dict["region"] = raw_droplet["region"]["slug"] + droplet_dict["tags"] = raw_droplet["tags"] + droplet_dict["vpc_uuid"] = raw_droplet["vpc_uuid"] + droplet_dict["firewalls"] = None + + droplet_fwconfig = await self.facade.droplet.get_droplet_fwconfig( + raw_droplet["id"] + ) + public_ports = {} + + if droplet_fwconfig: + if droplet_fwconfig["firewalls"]: + droplet_dict["firewalls"] = "" + for firewall in droplet_fwconfig["firewalls"]: + droplet_dict["firewalls"] = ( + droplet_dict["firewalls"] + " , " + firewall["id"] + if droplet_dict["firewalls"] + else firewall["id"] + ) + + for rules in firewall["inbound_rules"]: + if ( + "0.0.0.0/0" in rules["sources"]["addresses"] + or "::/0" in rules["sources"]["addresses"] + ): + public_ports[rules["ports"]] = rules["sources"]["addresses"] + + droplet_dict["all_ports_exposed"] = ( + "True" + if ("0" in public_ports.keys() or not droplet_fwconfig["firewalls"]) + else "False" + ) + droplet_dict["port_22_exposed"] = ( + "True" + if ("22" in public_ports.keys() or droplet_dict["all_ports_exposed"]) + else "False" + ) + + droplet_dict["public_ports_enabled"] = "True" if public_ports else "False" + droplet_dict["public_port_detail"] = ( + f"Port {','.join(public_ports.keys())} exposed to public internet due to this configuration {str(public_ports)}" + if public_ports + else "" + ) + + droplet_dict["features_monitoring"] = ( + "True" + if ("monitoring" in droplet_dict["features"]) + else "False" + ) + + + return droplet_dict["id"], droplet_dict diff --git a/ScoutSuite/providers/do/resources/kubernetes/__init__.py b/ScoutSuite/providers/do/resources/kubernetes/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/kubernetes/base.py b/ScoutSuite/providers/do/resources/kubernetes/base.py new file mode 100644 index 000000000..f48090fbc --- /dev/null +++ b/ScoutSuite/providers/do/resources/kubernetes/base.py @@ -0,0 +1,14 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.kubernetes.kubernetes import Kubernetes + + +class Kubernetes(DoCompositeResources): + _children = [(Kubernetes, "kubernetes")] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "kubernetes" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/kubernetes/kubernetes.py b/ScoutSuite/providers/do/resources/kubernetes/kubernetes.py new file mode 100644 index 000000000..d83e5ab2d --- /dev/null +++ b/ScoutSuite/providers/do/resources/kubernetes/kubernetes.py @@ -0,0 +1,25 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class Kubernetes(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + clusters = await self.facade.kubernetes.get_kubernetes() + if clusters: + for cluster in clusters: + id, cluster = await self._parse_cluster(cluster) + self[id] = cluster + + async def _parse_cluster(self, raw_cluster): + cluster_dict = {} + + cluster_dict["id"] = raw_cluster["id"] + cluster_dict["name"] = raw_cluster["name"] + cluster_dict["ha"] = raw_cluster["ha"] + cluster_dict["auto_upgrade"] = raw_cluster["auto_upgrade"] + cluster_dict["surge_upgrade"] = raw_cluster["surge_upgrade"] + + return cluster_dict["id"], cluster_dict diff --git a/ScoutSuite/providers/do/resources/networking/__init__.py b/ScoutSuite/providers/do/resources/networking/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/networking/base.py b/ScoutSuite/providers/do/resources/networking/base.py new file mode 100644 index 000000000..f9079148c --- /dev/null +++ b/ScoutSuite/providers/do/resources/networking/base.py @@ -0,0 +1,20 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.networking.firewalls import Firewalls +from ScoutSuite.providers.do.resources.networking.domains import Domains +from ScoutSuite.providers.do.resources.networking.load_balancers import LoadBalancers + + +class Networking(DoCompositeResources): + _children = [ + (Firewalls, "firewalls"), + (Domains, "domains"), + (LoadBalancers, "load_balancers"), + ] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "networking" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/networking/domains.py b/ScoutSuite/providers/do/resources/networking/domains.py new file mode 100644 index 000000000..1413278e6 --- /dev/null +++ b/ScoutSuite/providers/do/resources/networking/domains.py @@ -0,0 +1,50 @@ +from ScoutSuite.core.console import print_exception +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade +import re + + +class Domains(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + domains = await self.facade.networking.get_domains() + if domains: + for domain in domains: + name, domain = await self._parse_domain(domain) + if domain: + self[name] = domain + + async def _parse_domain(self, raw_domain): + domain_dict = {} + + domain_dict["name"] = raw_domain["name"] + zone_file = raw_domain["zone_file"] + + spf_pattern = re.compile(r'.*TXT.*v=spf.*', re.IGNORECASE) + domain_dict["spf_record"] = "True" if bool(re.search(spf_pattern, zone_file)) else "False" + dmarc_pattern = re.compile(r'.*TXT.*v=DMARC.*', re.IGNORECASE) + domain_dict["dmarc_record"] = "True" if bool(re.search(dmarc_pattern, zone_file)) else "False" + dkim_pattern = re.compile(r'.*TXT.*v=DKIM.*', re.IGNORECASE) + domain_dict["dkim_record"] = "True" if bool(re.search(dkim_pattern, zone_file)) else "False" + + ttl_regex = r"\.\s*(\d+)\s*IN" + ttl_matches = re.findall(ttl_regex, zone_file) + numbers = [int(match) for match in ttl_matches] + + domain_dict["highttl_records"] = ( + "True" + if max(numbers) > 3600 + else "False" + ) + + pattern1 = re.compile(r'.*TXT.*v=spf.*~all', re.IGNORECASE) + pattern2 = re.compile(r'.*TXT.*v=spf.*\+all', re.IGNORECASE) + domain_dict["spf_record_all"] = ( + "True" + if bool(re.search(pattern1, zone_file)) or bool(re.search(pattern2, zone_file)) + else "False" + ) + + return domain_dict["name"], domain_dict diff --git a/ScoutSuite/providers/do/resources/networking/firewalls.py b/ScoutSuite/providers/do/resources/networking/firewalls.py new file mode 100644 index 000000000..566b3f59a --- /dev/null +++ b/ScoutSuite/providers/do/resources/networking/firewalls.py @@ -0,0 +1,47 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class Firewalls(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + + firewalls = await self.facade.networking.get_firewalls() + if firewalls: + for firewall in firewalls: + id, firewall = await self._parse_firewall(firewall) + self[id] = firewall + + async def _parse_firewall(self, raw_firewall): + firewall_dict = {} + + firewall_dict["id"] = raw_firewall["id"] + firewall_dict["name"] = raw_firewall["name"] + firewall_dict["status"] = raw_firewall["status"] + firewall_dict["inbound_rules"] = raw_firewall["inbound_rules"] + firewall_dict["outbound_rules"] = raw_firewall["outbound_rules"] + firewall_dict["created_at"] = raw_firewall["created_at"] + firewall_dict["droplet_ids"] = str(raw_firewall["droplet_ids"]) + firewall_dict["tags"] = str(raw_firewall["tags"]) + firewall_dict["pending_changes"] = str(raw_firewall["pending_changes"]) + public_ports = {} + for rules in raw_firewall["inbound_rules"]: + if ( + "0.0.0.0/0" in rules["sources"]["addresses"] + or "::/0" in rules["sources"]["addresses"] + ): + public_ports[rules["ports"]] = rules["sources"]["addresses"] + + firewall_dict["all_ports_exposed"] = ( + "True" if ("0" in public_ports.keys()) else "False" + ) + firewall_dict["public_ports_enabled"] = "True" if public_ports else "False" + firewall_dict["public_port_detail"] = ( + f"Port {','.join(public_ports.keys())} exposed to public internet due to this configuration {str(public_ports)}" + if public_ports + else "" + ) + + return firewall_dict["id"], firewall_dict diff --git a/ScoutSuite/providers/do/resources/networking/load_balancers.py b/ScoutSuite/providers/do/resources/networking/load_balancers.py new file mode 100644 index 000000000..7ce6ca493 --- /dev/null +++ b/ScoutSuite/providers/do/resources/networking/load_balancers.py @@ -0,0 +1,30 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade + + +class LoadBalancers(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + load_balancers = await self.facade.networking.get_load_balancers() + if load_balancers: + for load_balancer in load_balancers: + id, load_balancer = await self._parse_load_balancer(load_balancer) + self[id] = load_balancer + + async def _parse_load_balancer(self, raw_load_balancer): + load_balancer_dict = {} + + load_balancer_dict["id"] = raw_load_balancer["id"] + load_balancer_dict["name"] = raw_load_balancer["name"] + load_balancer_dict["name"] = raw_load_balancer["name"] + load_balancer_dict["redirect_http_to_https"] = str( + raw_load_balancer["redirect_http_to_https"] + ) + load_balancer_dict["enable_backend_keepalive"] = str( + raw_load_balancer["enable_backend_keepalive"] + ) + load_balancer_dict["droplet_ids"] = str(raw_load_balancer["droplet_ids"]) + + return load_balancer_dict["id"], load_balancer_dict diff --git a/ScoutSuite/providers/do/resources/spaces/__init__.py b/ScoutSuite/providers/do/resources/spaces/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/resources/spaces/base.py b/ScoutSuite/providers/do/resources/spaces/base.py new file mode 100644 index 000000000..2f3d4fec5 --- /dev/null +++ b/ScoutSuite/providers/do/resources/spaces/base.py @@ -0,0 +1,14 @@ +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.do.resources.base import DoCompositeResources +from ScoutSuite.providers.do.resources.spaces.buckets import Buckets + + +class Spaces(DoCompositeResources): + _children = [(Buckets, "buckets")] + + def __init__(self, facade: DoFacade): + super().__init__(facade) + self.service = "buckets" + + async def fetch_all(self, **kwargs): + await self._fetch_children(resource_parent=self) diff --git a/ScoutSuite/providers/do/resources/spaces/buckets.py b/ScoutSuite/providers/do/resources/spaces/buckets.py new file mode 100644 index 000000000..7edc2dcb6 --- /dev/null +++ b/ScoutSuite/providers/do/resources/spaces/buckets.py @@ -0,0 +1,50 @@ +from ScoutSuite.providers.do.resources.base import DoResources +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.core.console import print_exception +import json + + +class Buckets(DoResources): + def __init__(self, facade: DoFacade): + super().__init__(facade) + + async def fetch_all(self): + + buckets = await self.facade.spaces.get_all_buckets() + if buckets: + for bucket in buckets: + id, bucket = await self._parse_buckets(bucket) + self[id] = bucket + + async def _parse_buckets(self, raw_buckets): + buckets_dict = {} + + buckets_dict["name"] = raw_buckets["Name"] + buckets_dict["public_read"] = ( + str(raw_buckets["grantees"]["AllUsers"]["permissions"]["read"]) + if "AllUsers" in raw_buckets.get("grantees", {}) + else False + ) + buckets_dict["public_write"] = ( + raw_buckets["grantees"]["AllUsers"]["permissions"]["write"] + if "AllUsers" in raw_buckets.get("grantees", {}) + else False + ) + buckets_dict["read_acp"] = ( + raw_buckets["grantees"]["AllUsers"]["permissions"]["read_acp"] + if "AllUsers" in raw_buckets.get("grantees", {}) + else False + ) + buckets_dict["write_acp"] = ( + raw_buckets["grantees"]["AllUsers"]["permissions"]["write_acp"] + if "AllUsers" in raw_buckets.get("grantees", {}) + else False + ) + buckets_dict["CORS"] = ( + True + if "CORS" in raw_buckets and raw_buckets["CORS"] and "AllowedOrigins" in raw_buckets["CORS"][0] + else False + ) + + + return buckets_dict["name"], buckets_dict diff --git a/ScoutSuite/providers/do/rules/filters/.gitkeep b/ScoutSuite/providers/do/rules/filters/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-mysql-publically-exposed.json b/ScoutSuite/providers/do/rules/findings/database-databases-mysql-publically-exposed.json new file mode 100644 index 000000000..28b350e02 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-mysql-publically-exposed.json @@ -0,0 +1,20 @@ +{ + "description": "Mysql Database cluster publically exposed", + "rationale": "Typically, only the application servers should be allowed to connect to the database cluster.", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.trusted_sources", + "equal", + "False" + ], + [ + "database.databases.id.engine", + "equal", + "mysql" + ] + ], + "id_suffix": "trusted_sources" +} diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-mysql-user-legacy-encryption.json b/ScoutSuite/providers/do/rules/findings/database-databases-mysql-user-legacy-encryption.json new file mode 100644 index 000000000..1138f2b28 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-mysql-user-legacy-encryption.json @@ -0,0 +1,20 @@ +{ + "description": "Mysql Database user with Legacy MySQL 5.x encryption", + "rationale": "DigitalOcean Managed Databases using MySQL 8+ are automatically configured to use caching_sha2_password authentication by default. caching_sha2_password uses a stronger password encryption than prior versions of MySQL.", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.legacy_encryption_users", + "notEqual", + "False" + ], + [ + "database.databases.id.engine", + "equal", + "mysql" + ] + ], + "id_suffix": "legacy_encryption_users" +} diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-postgres-connection-pools.json b/ScoutSuite/providers/do/rules/findings/database-databases-postgres-connection-pools.json new file mode 100644 index 000000000..cd6eb64e5 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-postgres-connection-pools.json @@ -0,0 +1,20 @@ +{ + "description": "No connection pools found for Postgres database", + "rationale": "When you use PostgreSQL without a connection pool, each client request creates a new connection to the database. This can lead to a high number of connections, which can cause performance issues and slow down your application. Connection pooling can help mitigate these issues by reusing existing connections instead of creating new ones for each request", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.connection_pools", + "equal", + "False" + ], + [ + "database.databases.id.engine", + "equal", + "pg" + ] + ], + "id_suffix": "connection_pools" +} diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-postgres-trusted-sources.json b/ScoutSuite/providers/do/rules/findings/database-databases-postgres-trusted-sources.json new file mode 100644 index 000000000..5d798845e --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-postgres-trusted-sources.json @@ -0,0 +1,15 @@ +{ + "description": "Databases publicly exposed", + "rationale": "Database services should restrict incoming requests only from trusted sources.", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.trusted_sources", + "equal", + "False" + ] + ], + "id_suffix": "trusted_sources" +} diff --git a/ScoutSuite/providers/do/rules/findings/database-databases-redis-evicition-policy.json b/ScoutSuite/providers/do/rules/findings/database-databases-redis-evicition-policy.json new file mode 100644 index 000000000..ad4055f5f --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/database-databases-redis-evicition-policy.json @@ -0,0 +1,20 @@ +{ + "description": "Eviction policy for Redis database cluster not set to 'allkeys-lru'", + "rationale": "When Redis is used as a cache, it is often convenient to let it automatically evict old data as you add new data. Redis provides several eviction policies to choose from, including allkeys-lru, allkeys-lfu, volatile-lru, volatile-lfu, allkeys-random, volatile-random, and volatile-ttl 1. If you do not set an eviction policy, Redis will use the noeviction policy by default. This means that Redis will not evict any keys when the memory limit is reached, and any new values will not be saved 1. If you do not set an eviction policy and Redis runs out of memory, it will start to return errors for commands that could result in more memory being used 1. In general, it is recommended to use the allkeys-lru policy when you expect a power-law distribution in the popularity of your requests. That is, you expect a subset of elements will be accessed far more often than the rest", + "dashboard_name": "Databases", + "path": "database.databases.id", + "conditions": [ + "and", + [ + "database.databases.id.eviction_policy", + "notEqual", + "allkeys_lru" + ], + [ + "database.databases.id.engine", + "equal", + "redis" + ] + ], + "id_suffix": "eviction_policy" +} diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-all-ports-exposed.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-all-ports-exposed.json new file mode 100644 index 000000000..a491c5164 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-all-ports-exposed.json @@ -0,0 +1,15 @@ +{ + "description": "Droplets with all ports exposed to public", + "rationale": "Droplets should expose only required/intented ports to public internet", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.all_ports_exposed", + "equal", + "True" + ] + ], + "id_suffix": "all_ports_exposed" +} diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-enabled.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-enabled.json new file mode 100644 index 000000000..0ba65c386 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-enabled.json @@ -0,0 +1,17 @@ +{ + "description": "Droplets with auto backups disabled", + "rationale": "Droplet backups feature should be enabled for disaster recovery.", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.next_backup_window", + "null", + "" + ] + ], + "id_suffix": "next_backup_window" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-present.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-present.json new file mode 100644 index 000000000..3ee3f6868 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-backup-not-present.json @@ -0,0 +1,17 @@ +{ + "description": "Droplets having no backups present", + "rationale": "Droplets should have atleast 1 backup present for disaster recovery.", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.backup_ids", + "equal", + "[]" + ] + ], + "id_suffix": "backup_ids" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-custom-image.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-custom-image.json new file mode 100644 index 000000000..123e68e1f --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-custom-image.json @@ -0,0 +1,16 @@ +{ + "description": "Droplets with custom image", + "rationale": "Using custom images instead of those provided by Digital Ocean may result in reduced security control, as user-created images may not include the latest security patches and configurations that are routinely maintained and updated in provider-supplied images.", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.image_type", + "equal", + "custom" + ] + ], + "id_suffix": "image_type" +} + diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-features-monitoring.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-features-monitoring.json new file mode 100644 index 000000000..477c6740d --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-features-monitoring.json @@ -0,0 +1,16 @@ +{ + "description": "Droplets without improved metrics monitoring enabled", + "rationale": "Droplets without improved metrics monitoring enabled", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.features_monitoring", + "equal", + "False" + ] + ], + "id_suffix": "features_monitoring" +} + diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-firewall-not-attached.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-firewall-not-attached.json new file mode 100644 index 000000000..e8f1937c7 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-firewall-not-attached.json @@ -0,0 +1,17 @@ +{ + "description": "Droplets with no firewall attached", + "rationale": "Droplet should have a firewall atatched for enabling secure network configuration", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.firewalls", + "null", + "" + ] + ], + "id_suffix": "firewalls" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-port-22-exposed.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-port-22-exposed.json new file mode 100644 index 000000000..cc9d7a86a --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-port-22-exposed.json @@ -0,0 +1,15 @@ +{ + "description": "Droplets with port 22 exposed to public", + "rationale": "Droplets should have port 22 restricted to trusted networks", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.port_22_exposed", + "equal", + "True" + ] + ], + "id_suffix": "port_22_exposed" +} diff --git a/ScoutSuite/providers/do/rules/findings/droplet-droplets-snapshot-not-present.json b/ScoutSuite/providers/do/rules/findings/droplet-droplets-snapshot-not-present.json new file mode 100644 index 000000000..62b956eff --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/droplet-droplets-snapshot-not-present.json @@ -0,0 +1,17 @@ +{ + "description": "Droplets having no snapshots present", + "rationale": "Droplets should have at least 1 snapshot present for strategic points of recovery, for instance, before a major change or update.", + "dashboard_name": "Droplets", + "path": "droplet.droplets.id", + "conditions": [ + "and", + [ + "droplet.droplets.id.snapshot_ids", + "equal", + "[]" + ] + ], + "id_suffix": "snapshot_ids" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-autoupgrade-minor.json b/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-autoupgrade-minor.json new file mode 100644 index 000000000..fec18603a --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-autoupgrade-minor.json @@ -0,0 +1,17 @@ +{ + "description": "Auto-Upgrade Minor Version Patches Disabled", + "rationale": "Enabling auto-upgrade for minor version patches in Kubernetes on Digital Ocean enhances security by automatically applying the latest security patches and bug fixes, ensuring that the system is protected against vulnerabilities without upgrading to a new minor version.", + "dashboard_name": "Kubernetes", + "path": "kubernetes.kubernetes.id", + "conditions": [ + "and", + [ + "kubernetes.kubernetes.id.auto_upgrade", + "equal", + "False" + ] + ], + "id_suffix": "auto_upgrade" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-ha-enabled.json b/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-ha-enabled.json new file mode 100644 index 000000000..c6bb1f8f3 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-ha-enabled.json @@ -0,0 +1,17 @@ +{ + "description": "High Availability for Control Plane", + "rationale": "Enabling the High Availability Control Plane feature in Kubernetes on Digital Ocean ensures that the cluster remains operational even if one or more control nodes fail, enhancing the reliability and resilience of your applications. Please note that this feature cannot be disabled once it has been enabled.", + "dashboard_name": "Kubernetes", + "path": "kubernetes.kubernetes.id", + "conditions": [ + "and", + [ + "kubernetes.kubernetes.id.ha", + "equal", + "False" + ] + ], + "id_suffix": "ha" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-surge-upgrade.json b/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-surge-upgrade.json new file mode 100644 index 000000000..d041b4f1d --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/kubernetes-kubernetes-surge-upgrade.json @@ -0,0 +1,17 @@ +{ + "description": "Surge Upgrade Disabled", + "rationale": "Enabling surge updates in Digital Ocean allows for the creation of additional nodes during updates before pods draining, ensuring that new versions are fully operational before old ones are terminated, thus avoiding downtime.", + "dashboard_name": "Kubernetes", + "path": "kubernetes.kubernetes.id", + "conditions": [ + "and", + [ + "kubernetes.kubernetes.id.surge_upgrade", + "equal", + "False" + ] + ], + "id_suffix": "surge_upgrade" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-high-ttl.json b/ScoutSuite/providers/do/rules/findings/networking-domains-high-ttl.json new file mode 100644 index 000000000..9a8a42487 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-high-ttl.json @@ -0,0 +1,15 @@ +{ + "description": "Domain has a high TTL record", + "rationale": "Long TTLs delay the propagation of changes. For instance, if you update an IP address or switch services, clients will continue using old cached data until the TTL expires", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.highttl_records", + "notEqual", + "False" + ] + ], + "id_suffix": "highttl_records" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dkim.json b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dkim.json new file mode 100644 index 000000000..d9c64dd38 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dkim.json @@ -0,0 +1,15 @@ +{ + "description": "Domain is missing DKIM record", + "rationale": "DKIM helps prevent email spoofing by adding cryptographic signatures to your outgoing emails", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.dkim_record", + "equal", + "False" + ] + ], + "id_suffix": "dkim_record" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dmarc.json b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dmarc.json new file mode 100644 index 000000000..350d18a28 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-dmarc.json @@ -0,0 +1,15 @@ +{ + "description": "Domain is missing DMARC record", + "rationale": "A DMARC policy tells a receiving email server what to do after checking a domain's Sender Policy Framework (SPF) and DomainKeys Identified Mail (DKIM) records, which are additional email authentication methods. Addtionally without DMARC, you won't be able receive reports about legitimate and unauthorized emails sent on behalf of your domain", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.dmarc_record", + "equal", + "False" + ] + ], + "id_suffix": "dmarc_record" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-missing-spf.json b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-spf.json new file mode 100644 index 000000000..d4f540b0b --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-missing-spf.json @@ -0,0 +1,15 @@ +{ + "description": "Domain is missing SPF record", + "rationale": "Without an SPF record, attackers can spoof your domain by sending emails that appear to originate from your legitimate domain", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.spf_record", + "equal", + "False" + ] + ], + "id_suffix": "spf_record" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-domains-spf-overly-permissive.json b/ScoutSuite/providers/do/rules/findings/networking-domains-spf-overly-permissive.json new file mode 100644 index 000000000..2b997e1aa --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-domains-spf-overly-permissive.json @@ -0,0 +1,15 @@ +{ + "description": "Domain has a overly permissive SPF record", + "rationale": "Overly permissive SPF record allows the anyone to send emails on your domain's behalf", + "dashboard_name": "Networking", + "path": "networking.domains.id", + "conditions": [ + "and", + [ + "networking.domains.id.spf_record_all", + "notEqual", + "False" + ] + ], + "id_suffix": "spf_record_all" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-firewalls-public-ports.json b/ScoutSuite/providers/do/rules/findings/networking-firewalls-public-ports.json new file mode 100644 index 000000000..fc037a57d --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-firewalls-public-ports.json @@ -0,0 +1,15 @@ +{ + "description": "Firewalls with publically exposed ports", + "rationale": "Firewalls should not expose sensitive exposed to public internet.", + "dashboard_name": "Networking", + "path": "networking.firewalls.id", + "conditions": [ + "and", + [ + "networking.firewalls.id.public_ports_enabled", + "equal", + "True" + ] + ], + "id_suffix": "public_ports_enabled" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-firewalls-quad-zero.json b/ScoutSuite/providers/do/rules/findings/networking-firewalls-quad-zero.json new file mode 100644 index 000000000..3087e3a14 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-firewalls-quad-zero.json @@ -0,0 +1,15 @@ +{ + "description": "Firewalls with quad zero configuration", + "rationale": "Firewalls with quad zero configuration expose all ports to public internet", + "dashboard_name": "Networking", + "path": "networking.firewalls.id", + "conditions": [ + "and", + [ + "networking.firewalls.id.all_ports_exposed", + "equal", + "True" + ] + ], + "id_suffix": "all_ports_exposed" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-load-balancer-backend-keepalive-disabled.json b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-backend-keepalive-disabled.json new file mode 100644 index 000000000..c32919003 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-backend-keepalive-disabled.json @@ -0,0 +1,15 @@ +{ + "description": "Load Balancer with backend Keepalive disabled", + "rationale": "Consider enabling Keep-Alive to improve performance, reduce latency and load", + "dashboard_name": "Networking", + "path": "networking.load_balancers.id", + "conditions": [ + "and", + [ + "networking.load_balancers.id.enable_backend_keepalive", + "equal", + "False" + ] + ], + "id_suffix": "enable_backend_keepalive" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-load-balancer-ssl-redirect-disabled.json b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-ssl-redirect-disabled.json new file mode 100644 index 000000000..8473227c7 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-ssl-redirect-disabled.json @@ -0,0 +1,15 @@ +{ + "description": "Load Balancer with SSL redirects disabled", + "rationale": "SSL redirects should be enabled to enforce https connection", + "dashboard_name": "Networking", + "path": "networking.load_balancers.id", + "conditions": [ + "and", + [ + "networking.load_balancers.id.redirect_http_to_https", + "equal", + "False" + ] + ], + "id_suffix": "redirect_http_to_https" +} diff --git a/ScoutSuite/providers/do/rules/findings/networking-load-balancer-without-droplet.json b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-without-droplet.json new file mode 100644 index 000000000..38b6cdd8c --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/networking-load-balancer-without-droplet.json @@ -0,0 +1,15 @@ +{ + "description": "Load Balancer without attached Droplets", + "rationale": "Load Balancer without attached Droplets", + "dashboard_name": "Networking", + "path": "networking.load_balancers.id", + "conditions": [ + "and", + [ + "networking.load_balancers.id.droplet_ids", + "equal", + "[]" + ] + ], + "id_suffix": "droplet_ids" +} diff --git a/ScoutSuite/providers/do/rules/findings/spaces-buckets-cors.json b/ScoutSuite/providers/do/rules/findings/spaces-buckets-cors.json new file mode 100644 index 000000000..72bfca120 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/spaces-buckets-cors.json @@ -0,0 +1,17 @@ +{ + "description": "CORS not configured in bucket", + "rationale": "CORS configuration in a bucket may significantly restrict web-based applications from accessing resources across different domains, potentially limiting the bucket's usefulness for content delivery and integration with external web services.", + "dashboard_name": "Spaces", + "path": "spaces.buckets.id", + "conditions": [ + "and", + [ + "spaces.buckets.id.CORS", + "equal", + "False" + ] + ], + "id_suffix": "CORS" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-read.json b/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-read.json new file mode 100644 index 000000000..500a641b2 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-read.json @@ -0,0 +1,17 @@ +{ + "description": "Bucket with public read access", + "rationale": "Buckets with sensitive data must be private only.", + "dashboard_name": "Spaces", + "path": "spaces.buckets.id", + "conditions": [ + "and", + [ + "spaces.buckets.id.public_read", + "equal", + "True" + ] + ], + "id_suffix": "public_read" +} + + diff --git a/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-write.json b/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-write.json new file mode 100644 index 000000000..ed3005fd9 --- /dev/null +++ b/ScoutSuite/providers/do/rules/findings/spaces-buckets-public-write.json @@ -0,0 +1,17 @@ +{ + "description": "Bucket with public write access", + "rationale": "Buckets with sensitive data must be private only.", + "dashboard_name": "Spaces", + "path": "spaces.buckets.id", + "conditions": [ + "and", + [ + "spaces.buckets.id.public_write", + "equal", + "true" + ] + ], + "id_suffix": "public_write" +} + + diff --git a/ScoutSuite/providers/do/rules/rulesets/default.json b/ScoutSuite/providers/do/rules/rulesets/default.json new file mode 100644 index 000000000..a56c2e8e0 --- /dev/null +++ b/ScoutSuite/providers/do/rules/rulesets/default.json @@ -0,0 +1,167 @@ +{ + "about": "Default ruleset for DigitalOcean.", + "rules": { + "droplet-droplets-backup-not-enabled.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "droplet-droplets-snapshot-not-present.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-load-balancer-without-droplet.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "droplet-droplets-custom-image.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "droplet-droplets-backup-not-present.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "droplet-droplets-firewall-not-attached.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "droplet-droplets-port-22-exposed.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "droplet-droplets-all-ports-exposed.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "spaces-buckets-public-read.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "spaces-buckets-cors.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-firewalls-public-ports.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-firewalls-quad-zero.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-load-balancer-ssl-redirect-disabled.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-load-balancer-backend-keepalive-disabled.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-domains-missing-spf.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-domains-missing-dkim.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-domains-missing-dmarc.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "networking-domains-spf-overly-permissive.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "networking-domains-high-ttl.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "database-databases-mysql-user-legacy-encryption.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "database-databases-redis-evicition-policy.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "database-databases-postgres-connection-pools.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "database-databases-postgres-trusted-sources.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "droplet-droplets-features-monitoring.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "kubernetes-kubernetes-ha-enabled.json": [ + { + "enabled": true, + "level": "warning" + } + ], + "kubernetes-kubernetes-surge-upgrade.json": [ + { + "enabled": true, + "level": "danger" + } + ], + "kubernetes-kubernetes-autoupgrade-minor.json": [ + { + "enabled": true, + "level": "warning" + } + ] + } +} diff --git a/ScoutSuite/providers/do/rules/rulesets/filters.json b/ScoutSuite/providers/do/rules/rulesets/filters.json new file mode 100644 index 000000000..d6a73a987 --- /dev/null +++ b/ScoutSuite/providers/do/rules/rulesets/filters.json @@ -0,0 +1,4 @@ +{ + "about": "Default set of filters for Scout", + "rules": {} +} diff --git a/ScoutSuite/providers/do/services.py b/ScoutSuite/providers/do/services.py new file mode 100644 index 000000000..80eab0ae3 --- /dev/null +++ b/ScoutSuite/providers/do/services.py @@ -0,0 +1,25 @@ +from ScoutSuite.providers.do.authentication_strategy import DoCredentials +from ScoutSuite.providers.do.resources.droplet.base import Droplets +from ScoutSuite.providers.do.resources.spaces.base import Spaces +from ScoutSuite.providers.do.resources.networking.base import Networking +from ScoutSuite.providers.do.resources.database.base import Databases +from ScoutSuite.providers.do.resources.kubernetes.base import Kubernetes +from ScoutSuite.providers.do.facade.base import DoFacade +from ScoutSuite.providers.base.services import BaseServicesConfig + + +class DigitalOceanServicesConfig(BaseServicesConfig): + def __init__(self, credentials: DoCredentials = None, **kwargs): + super().__init__(credentials) + + facade = DoFacade(credentials) + + self.droplet = Droplets(facade) + self.networking = Networking(facade) + self.database = Databases(facade) + self.kubernetes = Kubernetes(facade) + if self.credentials.session: + self.spaces = Spaces(facade) + + def _is_provider(self, provider_name): + return provider_name == "do" diff --git a/ScoutSuite/providers/do/utils.py b/ScoutSuite/providers/do/utils.py new file mode 100644 index 000000000..c08c8becb --- /dev/null +++ b/ScoutSuite/providers/do/utils.py @@ -0,0 +1,23 @@ +import boto3 +from ScoutSuite.core.console import print_exception, print_debug, print_warning + + +def get_client(service: str, session: boto3.session.Session, region: str = None): + """ + Instantiates an DO Spaces API client + + """ + + try: + return ( + session.client( + service, + region_name=region, + endpoint_url="https://" + region + ".digitaloceanspaces.com", + ) + if region + else session.client(service) + ) + except Exception as e: + print_exception(f"Failed to create client for the {service} service: {e}") + return None diff --git a/requirements.txt b/requirements.txt index 7f53a4dfc..eb8c8cdda 100755 --- a/requirements.txt +++ b/requirements.txt @@ -65,4 +65,7 @@ oss2>=2.8.0 oci>=2.2.4 # Kubernetes SDK -kubernetes \ No newline at end of file +kubernetes + +# DigitalOcean Cloud Provider +pydo >=0.2.0 diff --git a/tools/process_raw_response.py b/tools/process_raw_response.py index eec1a5c51..c2044f69d 100755 --- a/tools/process_raw_response.py +++ b/tools/process_raw_response.py @@ -52,7 +52,7 @@ def camel_to_snake(name, upper=False): parser.add_argument('-v', '--value', required=True, help="The raw response") args = parser.parse_args() - if args.provider not in ['aws', 'azure', 'aliyun', 'gcp', 'oci', 'kubernetes']: + if args.provider not in ['aws', 'azure', 'aliyun', 'gcp', 'oci', 'do', 'kubernetes']: # TODO support more providers print('Provider not implemented') exit() @@ -79,6 +79,9 @@ def camel_to_snake(name, upper=False): elif args.provider == 'oci': object_format = 'raw_{}.{}' object_value_dict = json.loads(args.value) + elif args.provider == 'do': + object_format = 'raw_{}.{}' + object_value_dict = json.loads(args.value) elif args.provider == 'kubernetes': object_format = 'raw_{}.{}' object_value_dict = json.loads(args.value) From 8dbdf2f27616625d04a35679088b8339ea3adac2 Mon Sep 17 00:00:00 2001 From: fernando-gallego <102300106+fernando-gallego@users.noreply.github.com> Date: Wed, 8 May 2024 12:16:13 +0200 Subject: [PATCH 29/32] Update requirements.txt Update some GCP dependencies as in https://github.com/nccgroup/ScoutSuite/pull/1589 and pin protobuf 3.20.1 since more recent versions break GCP libs --- requirements.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/requirements.txt b/requirements.txt index b43a0e8b8..d43d62487 100755 --- a/requirements.txt +++ b/requirements.txt @@ -6,6 +6,7 @@ cherrypy>=18.1.0 cherrypy-cors>=1.6 coloredlogs<=10.0 asyncio-throttle==0.1.1 +protobuf==3.20.1 # AWS Provider botocore>=1.20.21 @@ -19,10 +20,10 @@ google-cloud-container>=2.1.0 google-cloud-core>=0.29.1 google-cloud-iam>=0.1.0 google-cloud-logging>=2.2.0 -google-cloud-monitoring==1.1.0 +google-cloud-monitoring==1.1.1 google-cloud-resource-manager>=0.28.3 google-cloud-storage>=1.13.2 -google-cloud-kms==1.3.0 +google-cloud-kms==1.4.1 ## API Client Libraries google-api-python-client>=2.47.0 oauth2client>=4.1.3 @@ -68,4 +69,4 @@ oci>=2.2.4 kubernetes # DigitalOcean Cloud Provider -pydo >=0.2.0 \ No newline at end of file +pydo >=0.2.0 From 869455694b55f04efd93150741d7122d364ef439 Mon Sep 17 00:00:00 2001 From: fernando-gallego <102300106+fernando-gallego@users.noreply.github.com> Date: Wed, 8 May 2024 12:51:58 +0200 Subject: [PATCH 30/32] Update requirements.txt --- requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index d43d62487..e014a86c6 100755 --- a/requirements.txt +++ b/requirements.txt @@ -20,10 +20,10 @@ google-cloud-container>=2.1.0 google-cloud-core>=0.29.1 google-cloud-iam>=0.1.0 google-cloud-logging>=2.2.0 -google-cloud-monitoring==1.1.1 +google-cloud-monitoring==1.1.0 google-cloud-resource-manager>=0.28.3 google-cloud-storage>=1.13.2 -google-cloud-kms==1.4.1 +google-cloud-kms==1.3.0 ## API Client Libraries google-api-python-client>=2.47.0 oauth2client>=4.1.3 From 3654d547b44ed01fe826106cf0937729ad0139db Mon Sep 17 00:00:00 2001 From: fernando-gallego <102300106+fernando-gallego@users.noreply.github.com> Date: Wed, 8 May 2024 12:52:08 +0200 Subject: [PATCH 31/32] Update requirements.txt --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e014a86c6..eb8c8cdda 100755 --- a/requirements.txt +++ b/requirements.txt @@ -6,7 +6,6 @@ cherrypy>=18.1.0 cherrypy-cors>=1.6 coloredlogs<=10.0 asyncio-throttle==0.1.1 -protobuf==3.20.1 # AWS Provider botocore>=1.20.21 From 4194142c0433c1211901e08365b3722e173788c0 Mon Sep 17 00:00:00 2001 From: fernando-gallego <102300106+fernando-gallego@users.noreply.github.com> Date: Fri, 10 May 2024 11:09:31 +0200 Subject: [PATCH 32/32] Update __init__.py Update to v5.14.0 --- ScoutSuite/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ScoutSuite/__init__.py b/ScoutSuite/__init__.py index b791ff203..7a45051d3 100755 --- a/ScoutSuite/__init__.py +++ b/ScoutSuite/__init__.py @@ -1,5 +1,5 @@ __author__ = 'NCC Group' -__version__ = '5.13.0' +__version__ = '5.14.0' ERRORS_LIST = []