Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add new --viewer-prefix-list create parameter #165

Merged
merged 7 commits into from
Mar 27, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions cdk-lib/cloud-demo.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@ case 'ClusterMgmtParams': {
ssmParamNameViewerConfig: params.nameViewerConfigSsmParam,
ssmParamNameViewerDetails: params.nameViewerDetailsSsmParam,
planCluster: params.planCluster,
userConfig: params.userConfig,
});
viewerNodesStack.addDependency(captureBucketStack);
viewerNodesStack.addDependency(vpcStackToUse);
Expand Down
1 change: 1 addition & 0 deletions cdk-lib/core/context-types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ export interface UserConfig {
historyDays: number;
replicas: number;
pcapDays: number;
viewerPrefixList: string;
}

/**
Expand Down
28 changes: 21 additions & 7 deletions cdk-lib/viewer-stacks/viewer-nodes-stack.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import * as ssm from 'aws-cdk-lib/aws-ssm';
import * as path from 'path';
import { Construct } from 'constructs';
import * as ssmwrangling from '../core/ssm-wrangling';
import * as plan from '../core/context-types';
import * as types from '../core/context-types';

export interface ViewerNodesStackProps extends cdk.StackProps {
readonly arnViewerCert: string;
Expand All @@ -24,7 +24,8 @@ export interface ViewerNodesStackProps extends cdk.StackProps {
readonly osPassword: secretsmanager.Secret;
readonly ssmParamNameViewerConfig: string;
readonly ssmParamNameViewerDetails: string;
readonly planCluster: plan.ClusterPlan;
readonly planCluster: types.ClusterPlan;
readonly userConfig: types.UserConfig;
}

export class ViewerNodesStack extends cdk.Stack {
Expand Down Expand Up @@ -113,11 +114,16 @@ export class ViewerNodesStack extends cdk.Stack {
internetFacing: true,
loadBalancerName: `${props.clusterName}-Viewer`.toLowerCase() // Receives a random suffix, which minimizes DNS collisions
});
const listener = lb.addListener('Listener', {
protocol: elbv2.ApplicationProtocol.HTTP,
port: 80,
open: true
});

// If we have a prefix list, we need to create a SG for the LB that allows traffic from the prefix list
if (props.userConfig.viewerPrefixList) {
const sg = new ec2.SecurityGroup(this, 'ALBSG', {
vpc: props.viewerVpc,
description: 'Control access viewer ALB',
});
sg.addIngressRule(ec2.Peer.prefixList(props.userConfig.viewerPrefixList), ec2.Port.tcp(443), 'Allow HTTPS traffic from my prefix list');
lb.addSecurityGroup(sg);
}

// Our Arkime Capture container
const container = taskDefinition.addContainer('ViewerContainer', {
Expand All @@ -141,6 +147,13 @@ export class ViewerNodesStack extends cdk.Stack {
hostPort: viewerPort
});

/*
const listener = lb.addListener('Listener', {
protocol: elbv2.ApplicationProtocol.HTTP,
port: 80,
open: true
});

awick marked this conversation as resolved.
Show resolved Hide resolved
listener.addTargets('TargetGroup', {
protocol: elbv2.ApplicationProtocol.HTTP,
port: viewerPort,
Expand All @@ -156,6 +169,7 @@ export class ViewerNodesStack extends cdk.Stack {
interval: cdk.Duration.seconds(30),
},
});
*/

const certificate = acm.Certificate.fromCertificateArn(this, 'ViewerCert', props.arnViewerCert);
const httpsListener = lb.addListener('HttpsListener', {
Expand Down
10 changes: 8 additions & 2 deletions manage_arkime.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,13 +124,19 @@ def demo_traffic_destroy(ctx):
default=None,
type=click.STRING,
required=False)
@click.option(
"--viewer-prefix-list",
help=("The Prefix List to use for the Viewer LB."),
default=None,
type=click.STRING,
required=False)
@click.pass_context
def cluster_create(ctx, name, expected_traffic, spi_days, history_days, replicas, pcap_days, preconfirm_usage,
just_print_cfn, capture_cidr, viewer_cidr):
just_print_cfn, capture_cidr, viewer_cidr, viewer_prefix_list):
profile = ctx.obj.get("profile")
region = ctx.obj.get("region")
cmd_cluster_create(profile, region, name, expected_traffic, spi_days, history_days, replicas, pcap_days,
preconfirm_usage, just_print_cfn, capture_cidr, viewer_cidr)
preconfirm_usage, just_print_cfn, capture_cidr, viewer_cidr, viewer_prefix_list)
cli.add_command(cluster_create)

@click.command(help="Tears down the Arkime Cluster in your account; by default, leaves your data intact")
Expand Down
28 changes: 15 additions & 13 deletions manage_arkime/commands/cluster_create.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
logger = logging.getLogger(__name__)

def cmd_cluster_create(profile: str, region: str, name: str, expected_traffic: float, spi_days: int, history_days: int, replicas: int,
pcap_days: int, preconfirm_usage: bool, just_print_cfn: bool, capture_cidr: str, viewer_cidr: str):
pcap_days: int, preconfirm_usage: bool, just_print_cfn: bool, capture_cidr: str, viewer_cidr: str, viewer_prefix_list: str):
logger.debug(f"Invoking cluster-create with profile '{profile}' and region '{region}'")

aws_provider = AwsClientProvider(aws_profile=profile, aws_region=region)
Expand All @@ -51,7 +51,7 @@ def cmd_cluster_create(profile: str, region: str, name: str, expected_traffic: f

# Generate our capacity plan, then confirm it's what the user expected and it's safe to proceed with the operation
previous_user_config = _get_previous_user_config(name, aws_provider)
next_user_config = _get_next_user_config(name, expected_traffic, spi_days, history_days, replicas, pcap_days, aws_provider)
next_user_config = _get_next_user_config(name, expected_traffic, spi_days, history_days, replicas, pcap_days, viewer_prefix_list, aws_provider)
previous_capacity_plan = _get_previous_capacity_plan(name, aws_provider)
next_capacity_plan = _get_next_capacity_plan(next_user_config, previous_capacity_plan, capture_cidr, viewer_cidr, aws_provider)

Expand Down Expand Up @@ -111,19 +111,19 @@ def _is_initial_invocation(cluster_name: str, aws_provider: AwsClientProvider) -
def _should_proceed_with_operation(initial_invocation: bool, previous_capacity_plan: ClusterPlan, next_capacity_plan: ClusterPlan,
previous_user_config: UserConfig, next_user_config: UserConfig, preconfirm_usage: bool,
capture_cidr_block: str, viewer_cidr_block: str) -> bool:

if (not initial_invocation) and (capture_cidr_block or viewer_cidr_block):
# We can't change the CIDR without tearing down the VPC, which effectively means tearing down the entire
# Cluster and re-creating it. Instead of attempting to do that, we make the CIDR only set-able on creation.
logger.error("You can only specify the VPC CIDR(s) when you initially create the Cluster, as changing it"
" requires tearing down the entire Cluster. Aborting...")
return False

if next_capacity_plan.viewerVpc:
# Ensure the Viewer VPC's CIDR, if it exists, doesn't overlap with the Capture VPC's CIDR
viewer_network = ipaddress.ip_network(next_capacity_plan.viewerVpc.cidr.block, strict=False)
capture_network = ipaddress.ip_network(next_capacity_plan.captureVpc.cidr.block, strict=False)
capture_network = ipaddress.ip_network(next_capacity_plan.captureVpc.cidr.block, strict=False)

if viewer_network.overlaps(capture_network):
logger.error(f"Your specified Viewer VPC CIDR ({str(viewer_network)}) overlaps with your Capture VPC"
f" CIDR ({str(capture_network)}). Please ensure these two CIDRs do not overlap.")
Expand All @@ -139,7 +139,7 @@ def _should_proceed_with_operation(initial_invocation: bool, previous_capacity_p
logger.error(f"Your specified Capture capacity plan does not fit in the VPC; there are {available_ips} usable IPs in your VPC"
f" and your plan requires {required_ips} IPs. Aborting...")
return False

return True

def _get_previous_user_config(cluster_name: str, aws_provider: AwsClientProvider) -> UserConfig:
Expand All @@ -157,9 +157,9 @@ def _get_previous_user_config(cluster_name: str, aws_provider: AwsClientProvider
return UserConfig(None, None, None, None, None)

def _get_next_user_config(cluster_name: str, expected_traffic: float, spi_days: int, history_days: int, replicas: int,
pcap_days: int, aws_provider: AwsClientProvider) -> UserConfig:
pcap_days: int, viewer_prefix_list: str, aws_provider: AwsClientProvider) -> UserConfig:
# At least one parameter isn't defined
if None in [expected_traffic, spi_days, replicas, pcap_days, history_days]:
if None in [expected_traffic, spi_days, replicas, pcap_days, history_days, viewer_prefix_list]:
# Re-use the existing configuration if it exists
try:
stored_config_json = ssm_ops.get_ssm_param_json_value(
Expand All @@ -179,12 +179,14 @@ def _get_next_user_config(cluster_name: str, expected_traffic: float, spi_days:
user_config.replicas = replicas
if pcap_days is not None:
user_config.pcapDays = pcap_days
if viewer_prefix_list is not None:
user_config.viewerPrefixList = viewer_prefix_list

return user_config

# Existing configuration doesn't exist, use defaults
except ssm_ops.ParamDoesNotExist:
return UserConfig(MINIMUM_TRAFFIC, DEFAULT_SPI_DAYS, DEFAULT_HISTORY_DAYS, DEFAULT_REPLICAS, DEFAULT_S3_STORAGE_DAYS)
return UserConfig(MINIMUM_TRAFFIC, DEFAULT_SPI_DAYS, DEFAULT_HISTORY_DAYS, DEFAULT_REPLICAS, DEFAULT_S3_STORAGE_DAYS, None)
# All of the parameters defined
else:
return UserConfig(expected_traffic, spi_days, history_days, replicas, pcap_days)
Expand Down Expand Up @@ -361,7 +363,7 @@ def _tag_domain(cluster_name: str, aws_provider: AwsClientProvider):
"domainArn",
aws_provider
)

opensearch_client = aws_provider.get_opensearch()
opensearch_client.add_tags(
ARN=os_domain_Arn,
Expand Down Expand Up @@ -391,7 +393,7 @@ def _get_stacks_to_deploy(cluster_name: str, next_user_config: UserConfig, next_

def _get_cdk_context(cluster_name: str, next_user_config: UserConfig, next_capacity_plan: ClusterPlan, cert_arn: str,
aws_env: AwsEnvironment):

# We might not deploy all these, but we need to tell the CDK that they exist as something we might deploy in order
# for its auto-wiring to work.
stack_names = context.ClusterStackNames(
Expand All @@ -410,4 +412,4 @@ def _get_cdk_context(cluster_name: str, next_user_config: UserConfig, next_capac
next_user_config,
constants.get_config_bucket_name(aws_env.aws_account, aws_env.aws_region, cluster_name),
stack_names
)
)
30 changes: 15 additions & 15 deletions manage_arkime/core/capacity_planning.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,14 @@ def get_capture_node_capacity_plan(expected_traffic: float, azs: List[str]) -> C

if expected_traffic > MAX_TRAFFIC:
raise TooMuchTraffic(expected_traffic)

chosen_instance = next(instance for instance in CAPTURE_INSTANCES if expected_traffic <= instance.maxTraffic)

desired_instances = max(
chosen_instance.minNodes,
math.ceil(expected_traffic/chosen_instance.trafficPer)
)

return CaptureNodesPlan(
chosen_instance.instanceType,
desired_instances,
Expand Down Expand Up @@ -373,20 +373,20 @@ def _validate_cidr(self, cidr_str: str ):
overall_form = re.compile("^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}/[0-9]{1,2}$")
if not overall_form.match(cidr_str):
raise InvalidCidr(cidr_str)

prefix_portion = cidr_str.split("/")[0]
prefix_portions_correct = list(map(lambda x: int(x) >=0 and int(x) <= 255, prefix_portion.split(".")))
if not all(prefix_portions_correct):
raise InvalidCidr(cidr_str)

mask_portion = cidr_str.split("/")[1]
mask_portion_correct = int(mask_portion) >= 0 and int(mask_portion) <= 32
if not mask_portion_correct:
raise InvalidCidr(cidr_str)

def __eq__(self, other) -> bool:
return (self.block == other.block and self.prefix == other.prefix and self.mask == other.mask)

def __str__(self) -> str:
return self.block

Expand All @@ -396,7 +396,7 @@ def to_dict(self) -> Dict[str, any]:
"prefix": self.prefix,
"mask": self.mask,
}

DEFAULT_VPC_CIDR = Cidr("10.0.0.0/16") # What AWS VPC gives by default
DEFAULT_CAPTURE_PUBLIC_MASK = 28 # minimum subnet size; we don't need much in the Capture VPC public subnets
DEFAULT_VIEWER_PUBLIC_MASK = 28 # minimum subnet size; we don't need much in the Viewer VPC public subnets either
Expand Down Expand Up @@ -433,23 +433,23 @@ def from_dict(cls: Type[T_VpcPlan], input: Dict[str, any]) -> T_VpcPlan:
publicSubnetMask = input["publicSubnetMask"]

return cls(cidr, azs, publicSubnetMask)

def get_usable_ips(self) -> int:
total_ips = 2 ** (32 - int(self.cidr.mask))
public_ips = 2 ** (32 - int(self.publicSubnetMask)) * len(self.azs)
reserved_ips_per_subnet = 2 # The first (local gateway) and last (broadcast) IP are often reserved
reserved_private_ips = reserved_ips_per_subnet * len(self.azs)

return total_ips - public_ips - reserved_private_ips

def get_capture_vpc_plan(previous_plan: VpcPlan, capture_cidr_block: str, azs: List[str]) -> VpcPlan:
if previous_plan and all(value is not None for value in vars(previous_plan).values()):
return previous_plan
elif not capture_cidr_block:
return VpcPlan(DEFAULT_VPC_CIDR, azs, DEFAULT_CAPTURE_PUBLIC_MASK)
else:
return VpcPlan(Cidr(capture_cidr_block), azs, DEFAULT_CAPTURE_PUBLIC_MASK)

def get_viewer_vpc_plan(previous_plan: VpcPlan, viewer_cidr_block: str, azs: List[str]) -> VpcPlan:
if previous_plan and all(value is not None for value in vars(previous_plan).values()):
return previous_plan
Expand Down Expand Up @@ -488,7 +488,7 @@ class ClusterPlan:
viewerVpc: VpcPlan

def __eq__(self, other) -> bool:
return (self.captureNodes == other.captureNodes and self.captureVpc == other.captureVpc
return (self.captureNodes == other.captureNodes and self.captureVpc == other.captureVpc
and self.ecsResources == other.ecsResources and self.osDomain == other.osDomain and self.s3 == other.s3
and self.viewerNodes == other.viewerNodes and self.viewerVpc == other.viewerVpc)

Expand Down Expand Up @@ -522,15 +522,15 @@ def from_dict(cls: Type[T_ClusterPlan], input: Dict[str, any]) -> T_ClusterPlan:
viewer_vpc = None

return cls(capture_nodes, capture_vpc, ecs_resources, os_domain, s3, viewer_nodes, viewer_vpc)

def get_required_capture_ips(self) -> int:
required_capture_ips = self.captureNodes.maxCount + self.osDomain.dataNodes.count + self.osDomain.masterNodes.count
required_viewer_ips = self.viewerNodes.maxCount if not self.viewerVpc else 0

return required_capture_ips + required_viewer_ips

def will_capture_plan_fit(self) -> bool:
usable_ips = self.captureVpc.get_usable_ips()
required_ips = self.get_required_capture_ips()
required_ips = self.get_required_capture_ips()

return usable_ips >= required_ips
return usable_ips >= required_ips
4 changes: 3 additions & 1 deletion manage_arkime/core/user_config.py
awick marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ class UserConfig:
historyDays: int
replicas: int
pcapDays: int
viewerPrefixList: str = None

""" Only process fields we still need, this allows us to ignore config no longer used """
@classmethod
Expand All @@ -29,6 +30,7 @@ def to_dict(self) -> Dict[str, any]:
'spiDays': self.spiDays,
'replicas': self.replicas,
'pcapDays': self.pcapDays,
'historyDays': self.historyDays
'historyDays': self.historyDays,
'viewerPrefixList': self.viewerPrefixList,
}

Loading
Loading