diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 0000000..e69de29 diff --git a/404.html b/404.html new file mode 100644 index 0000000..d6b31bc --- /dev/null +++ b/404.html @@ -0,0 +1,3880 @@ + + + +
+ + + + + + + + + + + + + + +IAM-Role Assume role link
+Youtube Video_link
+I need my AWS Lambda function to assume an AWS Identity and Access Management (IAM) role in another AWS account. How do I set that up?
+Short description + To have your Lambda function assume an IAM role in another AWS account, do the following:
+Configure your Lambda function's execution role to allow the function to assume an IAM role in another AWS account.
+Note: A Lambda function can assume an IAM role in another AWS account to do either of the following:
+Access resources—For example, accessing an Amazon Simple Storage Service (Amazon S3) bucket.
+Do tasks—For example, starting and stopping instances. +Resolution +Note: The following example procedure references two different types of AWS accounts:
+A home account that hosts the Lambda function ( 111111111111).
+A cross-account that includes the IAM role that the Lambda function assumes (222222222222) +The procedure assumes:
+You have created the IAM role that you want to use in the cross-account (222222222222)
+Important: Replace 222222222222 with the AWS account ID of the cross-account role that your function is assuming. Replace role-on-source-account with the assumed role's name.
+Iam
+Add the following policy statement to your cross-account IAM role's trust policy (in account 222222222222) by following the instructions in Modifying a role trust policy (console):
+Important: Replace 111111111111 with the AWS account ID of the account that your Lambda function is in. Replace my-lambda-execution-role with the name of your function's + execution role.
+Example
+ +Note: The AWS STS AssumeRole API call returns credentials that you can use to create a service client. By using this service client, your Lambda function has the permissions granted to it by the assumed role. For more information, see assume_role in the AWS SDK for Python (Boto 3) documentation.
+Important: Replace 222222222222 with the AWS account ID of the cross-account role that your function is assuming. Replace role-on-source-account with the assumed role's name.
+Python
+import boto3
+
+def lambda_handler(event, context):
+
+ sts_connection = boto3.client('sts')
+ acct_b = sts_connection.assume_role(
+ RoleArn="arn:aws:iam::222222222222:role/role-on-source-account",
+ RoleSessionName="cross_acct_lambda"
+ )
+
+ ACCESS_KEY = acct_b['Credentials']['AccessKeyId']
+ SECRET_KEY = acct_b['Credentials']['SecretAccessKey']
+ SESSION_TOKEN = acct_b['Credentials']['SessionToken']
+
+ # create service client using the assumed role credentials, e.g. S3
+ client = boto3.client(
+ 's3',
+ aws_access_key_id=ACCESS_KEY,
+ aws_secret_access_key=SECRET_KEY,
+ aws_session_token=SESSION_TOKEN,
+ )
+
+ return "Hello from Lambda"
+
Troubleshoot
+IAM-Assume-Role-Errorblog_link
+Youtube video_link
+ + + + + + + + + + + + + +Assign Private_IP-Address AWS_Link
+EC2-Using-Boto3-Pyhton Boto-3-Python
+Using-Bot-3 BOTO-3
+Json Formatter @ CuriousConcept
+Secruity_Group_Boto-3 SG_BOTO-3
+Read Office document for amazon==> Choose Document
+https://aws.amazon.com/kinesis/getting-started/?nc=sn&loc=3
+
sdk for in github
+S3 bucket prefix :-
+Note
+shyaway-WAF-CDN-Global-logs/Access_logs/!{timestamp:yyyy}/!{timestamp:MM}/!{timestamp:dd}/!{timestamp:HH}/
+S3 bucket error prefix :-
+shyaway-WAF-CDN-Global-logs/Error-logs/year=!{firehose:error-output-type}/!{timestamp:yyyy'-'MM'-'dd}/"
+Note
+Amazon Kinesis Data Firehose custom prefixes for Amazon S3 aws.com.
+ + + +(ecs batch processing)[https://github.com/aws-samples/ecs-refarch-batch-processing]
+ + + + + + + + + + + + + +To count number of request url path per ip's +SELECT DISTINCT client_ip, + request_url, + count() AS count +FROM alb_logs +WHERE parse_datetime(time,'yyyy-MM-dd''T''HH:mm:ss.SSSSSS''Z') + BETWEEN parse_datetime('2021-09-08-00:00:00','yyyy-MM-dd-HH:mm:ss') + AND parse_datetime('2021-09-08-23:59:00','yyyy-MM-dd-HH:mm:ss') +GROUP BY client_ip,request_url +ORDER BY count() DESC
+ +WITH dataset as +( +SELECT action as waf_action, +terminatingRuleType waf_rule_type, +terminatingruleid waf_rule_id,timestamp, +httprequest.clientip, +httprequest.country, +headeritems AS header, +httprequest.uri, +httprequest.args +FROM "shyaway_logs"."waf_global_alb_logs" waf +CROSS JOIN UNNEST(httprequest.headers) AS t(headeritems) +) +select count(*),waf_action, waf_rule_type,waf_rule_id, clientip, country, header.value, uri, args +from dataset +WHERE waf_action='BLOCK' AND timestamp between 1606483800000 and 1606548600000 +GROUP BY waf_action,waf_rule_id,waf_rule_type,clientip,country,header.value, uri,args
+ +WITH dataset as +( +SELECT action as waf_action, +terminatingRuleType waf_rule_type, +terminatingruleid test, +httprequest.clientip, +httprequest.country, +headeritems AS header, +httprequest.uri, +httprequest.args +FROM "shyaway_logs"."waf_global_alb_logs" waf +CROSS JOIN UNNEST(httprequest.headers) AS t(headeritems) +) +select waf_action, waf_rule_type,test, clientip, country, header.value, uri, args +from dataset +where header.name='user-agent' and header.value='facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)' and waf_action='BLOCK' limit 10
+ +SELECT COUNT(*) AS +count,httpRequest.country, +terminatingruleid, +httprequest.clientip, +action, +httprequest.uri,timestamp +FROM shyaway_waf_cdn_global_logs +WHERE action='BLOCK' +GROUP BY timestamp,httpRequest.country,terminatingruleid, httprequest.clientip, httprequest.uri, action +ORDER BY count DESC,timestamp DESC +LIMIT 1000;
+ +SELECT * +FROM alb_logs +WHERE ("request_url" = 'https://www.shyaway.com:443/bra-online/?bra_offers=buy-2-get-3-free&sku=S28035-Red&utm_source=fblk1p&utm_medium=bra&utm_campaign=999off') limit 10
+ + +CREATE OR REPLACE VIEW count-view-alb-logs AS +SELECT + "elb" +, "count"(*) "count" +FROM + alb_logs +WHERE ("parse_datetime"("time", 'yyyy-MM-dd''T''HH:mm:ss.SSSSSS''Z') BETWEEN "parse_datetime"('2020-10-11-00:00:00', 'yyyy-MM-dd-HH:mm:ss') AND "parse_datetime"('2020-10-11-03:00:00', 'yyyy-MM-dd-HH:mm:ss')) +GROUP BY "elb" +LIMIT 100
+ +SELECT elb,count(*) FROM "alb_logs" WHERE parse_datetime(time,'yyyy-MM-dd''T''HH:mm:ss.SSSSSS''Z') + BETWEEN parse_datetime('2020-11-26-00:00:00','yyyy-MM-dd-HH:mm:ss') + AND parse_datetime('2020-11-27-23:59:00','yyyy-MM-dd-HH:mm:ss') +GROUP BY elb +limit 10
+ +SELECT COUNT(*) AS +count,elb_status_code,request_url,user_agent +FROM alb_logs +WHERE request_url='https://www.shyaway.com:443/bra-online/?bra_offers=buy-2-get-3-free&sku=S28035-Red&utm_source=fblk1p&utm_medium=bra&utm_campaign=999off' +GROUP BY elb_status_code,request_url,user_agent +ORDER BY count DESC +LIMIT 10;
+ +SELECT COUNT(*) AS +count,elb_status_code,request_url,user_agent +FROM alb_logs +WHERE user_agent='facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)' AND elb_status_code='403' +GROUP BY request_url,elb_status_code,user_agent +ORDER BY count DESC +LIMIT 10;
+ +SELECT COUNT(*) AS +count,httpRequest.country, +terminatingruleid, +httprequest.clientip, +action, +httprequest.uri,timestamp +FROM waf_global_alb_logs +WHERE action='BLOCK' AND timestamp between 1606503600000 and 1606863600000 +GROUP BY timestamp,httpRequest.country,terminatingruleid, httprequest.clientip, httprequest.uri, action +ORDER BY count DESC,timestamp DESC +limit 10;
+ +WITH dataset as +( +SELECT action as waf_action, +terminatingRuleType waf_rule_type, +terminatingruleid waf_rule_id,timestamp, +httprequest.clientip, +httprequest.country, +headeritems AS header, +httprequest.uri, +httprequest.args +FROM "shyaway_logs"."waf_global_alb_logs" waf +CROSS JOIN UNNEST(httprequest.headers) AS t(headeritems) +) +select count(*),waf_action, waf_rule_type,waf_rule_id, clientip, country, header.value, uri, args,timestamp,uri +from dataset +WHERE waf_action='BLOCK' AND timestamp between 1606503600000 and 1606863600000 +GROUP BY waf_action,timestamp,waf_rule_id,waf_rule_type,clientip,country,header.value, uri,args,timestamp,uri
+ +SELECT elb_status_code,client_ip,request_url,user_agent, + count(*) AS count +FROM "shyaway_logs"."alb_log" +WHERE elb_status_code LIKE '%400%' AND parse_datetime(time,'yyyy-MM-dd''T''HH:mm:ss.SSSSSS''Z') + BETWEEN parse_datetime('2021-01-04-01:01:00','yyyy-MM-dd-HH:mm:ss') + AND parse_datetime('2021-01-04-23:59:00','yyyy-MM-dd-HH:mm:ss') +GROUP BY elb_status_code,client_ip,request_url,user_agent +ORDER BY count DESC,request_url DESC,user_agent DESC
+ +WITH dataset as +( +SELECT action as waf_action, +terminatingRuleType waf_rule_type, +terminatingruleid waf_rule_id,timestamp, +httprequest.clientip, +httprequest.country, +headeritems AS header, +httprequest.uri, +httprequest.args +FROM "shyaway_logs"."waf_global_alb_logs" waf +CROSS JOIN UNNEST(httprequest.headers) AS t(headeritems) +) +select count(*),waf_action, waf_rule_type,waf_rule_id, clientip, country, header.value, uri, args,timestamp,uri +from dataset +WHERE waf_action='BLOCK' AND timestamp > 1608187410000 +GROUP BY waf_action,timestamp,wafj_rule_id,waf_rule_type,clientip,country,header.value, uri,args,uri
+ + + + + + + + + + + + + + +https://www.wellarchitectedlabs.com/
+ + + + + + + + + + + + + +The Big Dev Theory +cicd sample coode +https://github.com/cycode-aws-demo/demo
+ + + + + + + + + + + + + +For DMS TASK
+Provide the Following Aws customer Details To the Magento or Service provider
+Aws account number +Region +Vpc +Subnet +Sepcify the port number to be opened by the service provider
+Get the "Endpoint" from Adobe or whoever is providing the service & Look out for "Endpoint" under Vpc in aws
+Paste the Provided "Endpoint" in "endpoint" in vpc & choose "Find service by name" and paste the endpoint in same vpc
+After Creation use the "DNS names" in endpoint to connect to appropriate service and port number
+Example:- +curl -v telnet://"DNS names":80 -vvv
+curl -v telnet://vpce-007ffnb9qkcnjgult-yfhmywqh.vpce-svc-083cqvm2ta3rxqat5v.us-east-1.vpce.amazonaws.com:80 -vvv
+Create the service or instance in same region,vpc,& subnet's
+Permission should be done In Master Db
+GRANT REPLICATION CLIENT, REPLICATION SLAVE ON Db.* TO 'passwd'@'%';
+It should be done by the provider or the root in master
+Permission should be done In SLAVE DB
+GRANT ALTER, CREATE, DROP, INDEX, INSERT, UPDATE, DELETE, SELECT ON . TO'passwd'@'%'; +GRANT ALL PRIVILEGES ON awsdms_control.* TO ''@'%';
+CREATE USER 'username'@'%' IDENTIFIED BY 'passwd';
+GRANT ALL PRIVILEGES ON * . * TO 'passwd'@'%';
+FLUSH PRIVILEGES;
+And create the target endpoint for master and slave and test the connection.
+ + + + + + + + + + + + + +https://docs.aws.amazon.com/aws-sdk-php/v2/guide/feature-dynamodb-session-handler.html
+ + + + + + + + + + + + + +<<<<<<< HEAD
+(ec2 ssh error)[https://aws.amazon.com/premiumsupport/knowledge-center/ec2-linux-resolve-ssh-connection-errors/]
+++ + + + + + + + + + + + + +++++++++++++1a23421 (Rebase_Edited_Update)
+
Allow only Specific user to access branch
+ +{
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "VisualEditor0",
+ "Effect": "Allow",
+ "Action": "codecommit:*",
+ "Resource": "repoarn",
+ "Condition": {
+ "StringEqualsIfExists": {
+ "codecommit:References": [
+ "refs/heads/Branch-name"
+ ]
+ }
+ }
+ }
+ ]
+}
+
Git command to clone
git clone --single-branch -b Branch-name repo-url
+https://medium.com/@it.melnichenko/invoke-a-lambda-across-multiple-aws-accounts-8c094b2e70be
+ + + + + + + + + + + + + + +Points to note
+* Allow Each subnetentries in both security group in both eks and ec2 instance both security group to allow communication
+* Note Ethernet eni interface number when command execution "eth0"it may changed using aws images.
+
+
+----
+
This section describes how to create and work with NAT instances to enable resources in a private subnet to communicate outside the virtual private cloud.
+Use the following procedure to create a VPC with a public subnet and a private subnet.
+Open the Amazon VPC console at https://console.aws.amazon.com/vpc/.
+Choose Create VPC.
+For Resources to create, choose VPC and more.
+For Name tag auto-generation, enter a name for the VPC.
+To configure the subnets, do the following:
+For Number of Availability Zones, choose 1 or 2, depending on your needs.
+For Number of public subnets, ensure that you have one public subnet per Availability Zone.
+For Number of private subnets, ensure that you have one private subnet per Availability Zone.
+Choose Create VPC.
+Create a security group with the rules described in the following table. These rules enable your NAT instance to receive internet-bound traffic from instances in the private subnet, as well as SSH traffic from your network. The NAT instance can also send traffic to the internet, which enables the instances in the private subnet to get software updates.
+The following are the inbound recommended rules.
+Source | +Protocol | +Port range | +Comments | +
---|---|---|---|
Private subnet CIDR | +TCP | +80 | +Allow inbound HTTP traffic from servers in the private subnetinternet | +
Private subnet CIDR | +TCP | +443 | +Allow inbound HTTPS traffic from servers in the private subnetinternet | +
Public IP address range of your network | +TCP | +22 | +Allow inbound SSH access to the NAT instance from your network (over the internet gateway) | +
The following are the recommended outbound rules.
+Destination | +Protocol | +Port range | +Comments | +
---|---|---|---|
0.0.0.0/0 | +TCP | +80 | +Allow outbound HTTP access to the internet | +
0.0.0.0/0 | +TCP | +443 | +Allow outbound HTTPS access to the internet | +
Open the Amazon VPC console at https://console.aws.amazon.com/vpc/.
+In the navigation pane, choose Security groups.
+Choose Create security group.
+Enter a name and description for the security group.
+For VPC, select the ID of the VPC for your NAT instance.
+Add rules for inbound traffic under Inbound rules as follows:
+Choose Add rule. Choose HTTP for Type and enter the IP address range of your private subnet for Source.
+Choose Add rule. Choose HTTPS for Type and enter the IP address range of your private subnet for Source.
+Choose Add rule. Choose SSH for Type and enter the IP address range of your network for Source.
+Add rules for outbound traffic under Outbound rules as follows:
+Choose Add rule. Choose HTTP for Type and enter 0.0.0.0/0 for Destination.
+Choose Add rule. Choose HTTPS for Type and enter 0.0.0.0/0 for Destination.
+Choose Create security group.
+For more information, see Security groups.
+A NAT AMI is configured to run NAT on an EC2 instance. You must create a NAT AMI and then launch your NAT instance using your NAT AMI.
+If you plan to use an operating system other than Amazon Linux for your NAT AMI, refer to the documentation for this operating system to learn how to configure NAT. Be sure to save these settings so that they persist even after an instance reboot.
+Launch an EC2 instance running AL2023 or Amazon Linux 2. Be sure to specify the security group that you created for the NAT instance.
+Connect to your instance and run the following commands on the instance to enable iptables.
+sudo yum install iptables-services -y
+sudo systemctl enable iptables
+sudo systemctl start iptables
+
Do the following on the instance to enable IP forwarding such that it persists after reboot:
+Using a text editor, such as nano or vim, create the following configuration file: /etc/sysctl.d/custom-ip-forwarding.conf
.
Add the following line to the configuration file.
+3. Save the configuration file and exit the text editor.
+
+4. Run the following command to apply the configuration file.
+
In the following example output, docker0
is a network interface created by docker, eth0
is the primary network interface, and lo
is the loopback interface.
Iface MTU RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP TX-OVR Flg
+docker0 1500 0 0 0 0 0 0 0 0 BMU
+eth0 9001 7276052 0 0 0 5364991 0 0 0 BMRU
+lo 65536 538857 0 0 0 538857 0 0 0 LRU
+
In the following example output, the primary network interface is enX0
.
Iface MTU RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP TX-OVR Flg
+enX0 9001 1076 0 0 0 1247 0 0 0 BMRU
+lo 65536 24 0 0 0 24 0 0 0 LRU
+
In the following example output, the primary network interface is ens5
.
Iface MTU RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP TX-OVR Flg
+ens5 9001 14036 0 0 0 2116 0 0 0 BMRU
+lo 65536 12 0 0 0 12 0 0 0 LRU
+
eth0
, replace eth0
with the primary network interface that you noted in the previous step.sudo /sbin/iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
+sudo /sbin/iptables -F FORWARD
+sudo service iptables save
+
Use the following procedure to launch a NAT instance using the VPC, security group, and NAT AMI that you created.
+Open the Amazon EC2 console at https://console.aws.amazon.com/ec2/.
+On the dashboard, choose Launch instance.
+For Name, enter a name for your NAT instance.
+For Application and OS Images, select your NAT AMI (choose Browse more AMIs, My AMIs).
+For Instance type, choose an instance type that provides the compute, memory, and storage resources that your NAT instance needs.
+For Key pair, select an existing key pair or choose Create new key pair.
+For Network settings, do the following:
+Choose Edit.
+For VPC, choose the VPC that you created.
+For Subnet, choose the public subnet that you created.
+For Auto-assign public IP, choose Enable. Alternatively, after you launch the NAT instance, allocate an Elastic IP address and assign it to the NAT instance.
+For Firewall, choose Select existing security group and then choose the security group that you created.
+Choose Launch instance. Choose the instance ID to open the instance details page. Wait for the instance state to change to Running and for the status checks to succeed.
+Disable source/destination checks for the NAT instance (see 5. Disable source/destination checks).
+Update the route table to send traffic to the NAT instance (see 6. Update the route table).
+Each EC2 instance performs source/destination checks by default. This means that the instance must be the source or destination of any traffic it sends or receives. However, a NAT instance must be able to send and receive traffic when the source or destination is not itself. Therefore, you must disable source/destination checks on the NAT instance.
+Open the Amazon EC2 console at https://console.aws.amazon.com/ec2/.
+In the navigation pane, choose Instances.
+Select the NAT instance.
+Choose Actions, Networking, Change source/destination check.
+For Source/destination checking, select Stop.
+Choose Save.
+If the NAT instance has a secondary network interface, choose it from Network interfaces on the Networking tab. Choose the interface ID to go to the network interfaces page. Choose Actions, Change source/dest. check, clear Enable, and choose Save.
+The route table for the private subnet must have a route that sends internet traffic to the NAT instance.
+Open the Amazon VPC console at https://console.aws.amazon.com/vpc/.
+In the navigation pane, choose Route tables.
+Select the route table for the private subnet.
+On the Routes tab, choose Edit routes and then choose Add route.
+Enter 0.0.0.0/0 for Destination and the instance ID of the NAT instance for Target.
+Choose Save changes.
+For more information, see Configure route tables.
+After you have launched a NAT instance and completed the configuration steps above, you can test whether an instance in your private subnet can access the internet through the NAT instance by using the NAT instance as a bastion server.
+To allow instances in your private subnet to send ping traffic to the NAT instance, add a rule to allow inbound and outbound ICMP traffic. To allow the NAT instance to serve as a bastion server, add a rule to allow outbound SSH traffic to the private subnet.
+Open the Amazon VPC console at https://console.aws.amazon.com/vpc/.
+In the navigation pane, choose Security groups.
+Select the check box for the security group associated with your NAT instance.
+On the Inbound rules tab, choose Edit inbound rules.
+Choose Add rule. Choose All ICMP - IPv4 for Type. Choose Custom for Source and enter the IP address range of your private subnet. Choose Save rules.
+On the Outbound rules tab, choose Edit outbound rules.
+Choose Add rule. Choose SSH for Type. Choose Custom for Destination and enter the IP address range of your private subnet.
+Choose Add rule. Choose All ICMP - IPv4 for Type. Choose Anywhere - IPv4 for Destination. Choose Save rules.
+Launch an instance into your private subnet. You must allow SSH access from the NAT instance, and you must use the same key pair that you used for the NAT instance.
+Open the Amazon EC2 console at https://console.aws.amazon.com/ec2/.
+On the dashboard, choose Launch instance.
+Select your private subnet.
+Do not assign a public IP address to this instance.
+Ensure that the security group for this instance allows inbound SSH access from your NAT instance, or from the IP address range of your public subnet, and outbound ICMP traffic.
+Select the same key pair that you used for the NAT instance.
+To verify that the test instance in your private subnet can use your NAT instance to communicate with the internet, run the ping command.
+From your local computer, configure SSH agent forwarding, so that you can use the NAT instance as a bastion server. +
+From your local computer, connect to your NAT instance. +
+From the NAT instance, run the ping command, specifying a website that is enabled for ICMP.
+To confirm that your NAT instance has internet access, verify that you received output such as the following, and then press Ctrl+C to cancel the ping command. Otherwise, verify that the NAT instance is in a public subnet (its route table has a route to an internet gateway).
+PING ietf.org (104.16.45.99) 56(84) bytes of data.
+64 bytes from 104.16.45.99 (104.16.45.99): icmp_seq=1 ttl=33 time=7.88 ms
+64 bytes from 104.16.45.99 (104.16.45.99): icmp_seq=2 ttl=33 time=8.09 ms
+64 bytes from 104.16.45.99 (104.16.45.99): icmp_seq=3 ttl=33 time=7.97 ms
+...
+
To confirm that your private instance has internet access through the NAT instance verify that you received output such as the following, and then press Ctrl+C to cancel the ping command.
+PING ietf.org (104.16.45.99) 56(84) bytes of data.
+64 bytes from 104.16.45.99 (104.16.45.99): icmp_seq=1 ttl=33 time=8.76 ms
+64 bytes from 104.16.45.99 (104.16.45.99): icmp_seq=2 ttl=33 time=8.26 ms
+64 bytes from 104.16.45.99 (104.16.45.99): icmp_seq=3 ttl=33 time=8.27 ms
+...
+
If the ping command fails from the server in the private subnet, use the following steps to troubleshoot the issue:
+Verify that you pinged a website that has ICMP enabled. Otherwise, your server can't receive reply packets. To test this, run the same ping command from a command line terminal on your own computer.
+Verify that the security group for your NAT instance allows inbound ICMP traffic from your private subnet. Otherwise, your NAT instance can't receive the ping command from your private instance.
+Verify that you disabled source/destination checking for your NAT instance. For more information, see 5. Disable source/destination checks.
+Verify that you configured your route tables correctly. For more information, see 6. Update the route table.
+If you no longer require the test server in the private subnet, terminate the instance so that you are no longer billed for it. For more information, see Terminate your instance in the Amazon EC2 User Guide.
+If you no longer require the NAT instance, you can stop or terminate it, so that you are no longer billed for it. If you created a NAT AMI, you can create a new NAT instance whenever you need one.
+https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-comparison.html
+ + + + + + + + + + + + + +RDS Performance Insights is a database performance tuning and monitoring feature that helps you quickly assess the load on your database and determine when and where to take action.
+ + + + + + + + + + + + + +TLS 1.2 to become the minimum TLS protocol level for all AWS API endpoints
+https://aws.amazon.com/blogs/security/tls-1-2-required-for-aws-endpoints/
+ + + + + + + + + + + + + +https://ahmedahamid.com/which-is-better/#:~:text=The%20graph%20shows%20that%20gp3,allows%20a%20much%20higher%20throughput.
+ + + + + + + + + + + + + + + + +use locate command to find the files for starting ""service php56-php-fpm status""
+cat /etc/os-release +<<<<<<< HEAD +<<<<<<< HEAD +yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +======= +yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpmyum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+++>>>>>> 1a23421 (Rebase_Edited_Update)
+yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm +yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+++++++++++++33a4921 (AWS) +yum install http://rpms.remirepo.net/enterprise/remi-release-7.rpm -y +yum install yum-utils -y +yum-config-manager --enable remi-php56 +yum install php56 php56-mcrypt php56-cli php56-gd php56-curl php56-mysql php56-ldap php56-zip php56-fileinfo php56-php-fpm install php56-php-pecl-mysqlnd-ms php56-php-pdo -y +cd /usr/bin +ln -sf php56 php
+
ln -sf php56 php +php -v
+wget https://cdn.mysql.com/archives/mysql-5.5/MySQL-5.5.55-1.el7.x86_64.rpm-bundle.tar +tar -xvf MySQL-5.5. +rm -rf .tar +yum install * +cat /etc/my.conf
+Check my.cnf and set correct access log +PLEASE REMEMBER TO SET A PASSWORD FOR THE MySQL root USER ! +To do so, start the server, then issue the following commands: + /usr/bin/mysqladmin -u root password '*' + /usr/bin/mysqladmin -u root -h ip-172-31-92-146.ec2.internal password '***'
+Alternatively you can run:
+/usr/bin/mysql_secure_installation
+
sudo service mysql stop +sudo nano /etc/mysql/my.cnf
+add it in my.cof +user=mysql
+sudo chown -R mysql:mysql /var/lib/mysql/ +sudo service mysql start
+ + + + + + + + + + + + + +Bot crawler[aws.com]](https://aws.amazon.com/blogs/architecture/field-notes-how-to-identify-and-block-fake-crawler-bots-using-aws-waf/).
+ + + + + + + + + + + + + +https://docs.aws.amazon.com/codepipeline/latest/userguide/tutorials.html
+https://pipelines.devops.aws.dev/
+ + + + + + + + + + + + + +proceed by elimination (rule out the answers that you know for sure are wrong) +remaning answers, understand which one makes the most sense +if a solution seems feesable by highly complicated, probably it is wrong
+white papers : + archecting for the cloud: aws best practies + aws well-archited framewrok + aws disaster recovery
+read each services faq: + faq covers lot of questions asked in exams + example : h
+ + + + + + + + + + + + + +Code Commmit
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Sid": "VisualEditor0",
+ "Effect": "Allow",
+ "Action": "codecommit:*",
+ "Resource": [
+ "arn:aws:codecommit:us-east-1:123456789123:repo-name",
+ "arn:aws:codecommit:us-east-1:123456789123:repo-name",
+ ],
+ "Condition": {
+ "StringEqualsIfExists": {
+ "codecommit:References": [
+ "refs/heads/master",
+ "refs/heads/main",
+ "refs/heads/mani",
+ "refs/heads/mack",
+ "refs/heads/eswar"
+ ]
+ },
+ "IpAddress": {
+ "aws:SourceIp": [
+ "192.168.0.100",
+ "192.168.0.101"
+ ]
+ }
+ }
+ }
+ ]
+ }
+
<<<<<<< HEAD
+Master account
+lambda-ec2-fleet-management
+ec2-start-stop-lambda-role
+======= +Add service policy
+AmazonEC2FullAccess +ServiceQuotasFullAccess
+++++++++++++++1a23421 (Rebase_Edited_Update) +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { +<<<<<<< HEAD + "Service": "lambda.amazonaws.com" +======= + "AWS": "arn:aws:iam::root-acccount-number:role/ec2-start-stop-lambda-role" +1a23421 (Rebase_Edited_Update) + }, + "Action": "sts:AssumeRole" + } + ] +<<<<<<< HEAD +}
+
AdministratorAccess +AmazonSNSFullAccess +AWSMarketplaceFullAccess +Tag
+date +19/06/2023
+Project +Ec2-Fleet-Management
+==================================
+Slave account
+ec2-fleet-management-lambda-assume-role
+Description :- Ec2-Fleet-Management By Lambda From Master Account
+{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::362778997593:role/lambda-ec2-fleet-management" + }, + "Action": "sts:AssumeRole" + } + ] +}
+permission
+AmazonEC2FullAccess +ServiceQuotasFullAccess
+Tag
+date +19/06/2023
+Project +Ec2-Fleet-Management +======= +}
+++ + + + + + + + + + + + + +++++++++++++1a23421 (Rebase_Edited_Update)
+
Create a Role
+Put +In Trusted Relationship
+{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::362778997593:root" + }, + "Action": "sts:AssumeRole", + "Condition": {} + } + ] +}
+Add All These In Permission Block
+AdministratorAccess AWS managed +Billing AWS managed +AWSBillingConductorFullAccess
+Allow Access on Belhahf for root account
+tag
+ + + + + + + + + + + + + +https://dev.to/mlabouardy/devops-bulletin-86-serverless-testing-61j
+https://dev.to/kumo/learn-serverless-on-aws-step-by-step-databases-kkg
+ + + + + + + + + + + + + +[mysqld]
+port = 3306
+datadir=/var/lib/mysql
+socket=/var/lib/mysql/mysql.sock
+tmpdir = /var/lib/mysql/mysql_temp
+slave-skip-errors=1062,1146,1053,1064,1032,1677
+slow_query_log = ON
+long_query_time = 10
+slow_query_log_file = /var/log/mysqld/slow.log
read_buffer_size = 8M +sort_buffer_size = 8M
+symbolic-links=0 +user=mysql
+innodb-buffer-pool-size = 24M +innodb-flush-method = O_DIRECT +innodb-flush-log-at-trx-commit = 2 +innodb_buffer_pool_instances = 8 +innodb_log_file_size = 50M +innodb_log_buffer_size = 32M +innodb_thread_concurrency = 8
+max-heap-table-size = 32M +query_cache_type = 1 +query_cache_size = 800M +max_connections = 1700 +wait_timeout = 800 +thread_cache_size = 512 +open-files-limit = 65535 +table-definition-cache = 1024 +table-open-cache = 2048
+max-allowed-packet = 1G +max-connect-errors = 1000
+log-bin = /var/lib/mysql/mysql-bin +expire-logs-days = 14 +sync-binlog = 1
+[mysqld_safe] +log-error=/var/log/mysqld.log +pid-file=/var/run/mysqld/mysqld.pid
+[client] +port = 3306 +socket = /var/lib/mysql/mysql.sock
+Tuining mysql
+read_buffer_size = 62M +sort_buffer_size = 62M +innodb_buffer_pool_size = 24M +read_buffer_size = 62M --- 1MB for every 1GB of RAM) +sort_buffer_size = 62M --- 1MB for every 1GB of RAM) +innodb_buffer_pool_size = 24M
+ + + + + + + + + + + + + +Understanding T2 Standard Inastance CPU Credits.
+Key concepts and definitions for burstable performance Instances.
+Lower Costs Today by Right-Sizing Your EC2 Instance Amazon EC2 T3 Instances.
+Check with other types of services +https://aws.amazon.com/products/storage/
+This Type of service provides
+Info
+AWS Storage Gateway
+connecting s3 via NFS
AND SMB
service
allow users to download only their own files.
+use presigned URLs to share access to your S3 buckets. When you create a presigned URL, you associate it with a specific action and an expiration date. Anyone who has access to the URL can perform the action embedded in the URL as if they were the original signing user.
+
https://www.youtube.com/watch?v=N2CLxDgMDDo&t=1568s
+ + + + + + + + + + + + + + + + +For implementations aws-waf-security aws.com.
+ + +Athena Query domain.com.
+ + + +waf rate_based rule aws.com.
+ + + + + + + + + + + + + +https://ecsworkshop.com/
+ + + + + + + + + + + + + +Add Required Policies to Roles
+
+Inline Policy
+
+=== "Role Name: Mk-ec2"
+ ```
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Action": "sts:AssumeRole",
+ "Resource": "arn:aws:iam::331911183167:role/Mk-ec2-policy"
+ }
+ ]
+ }
+ ```
+
+Trusted entities
+
+=== "Trusted entities"
+ ```
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "Service": "ec2.amazonaws.com"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+ }
+ ```
+
Add Required Policies to Roles
+
+Trusted entities
+
+=== "Trusted entities"
+ ```
+ {
+ "Version": "2012-10-17",
+ "Statement": [
+ {
+ "Effect": "Allow",
+ "Principal": {
+ "AWS": "arn:aws:iam::362778997593:role/Mk-ec2"
+ },
+ "Action": "sts:AssumeRole"
+ }
+ ]
+ }
+ ```
+
Add content in .aws/config if fole is not Present create it.
+
+=== "Trusted entities"
+ ```
+ [profile seeding]
+ role_arn = arn:aws:iam::331911183167:role/Mk-ec2-policy
+ credential_source = Ec2InstanceMetadata
+ ```
+
aws sts get-caller-identity --profile seeding
+
![Eks cluster using iam Referenced url](https://antonputra.com/kubernetes/add-iam-user-and-iam-role-to-eks/#add-iam-user-to-eks-cluster)
+
+If You create a Cluster using IAM User You Don't Need to do this,
+If You have accessing using Role without user, use this Below Method.
+
+=== ""
+ ``` bash
+ aws eks update-kubeconfig --region us-east-1 --name seeding --profile seeding
+
+ kubectl edit -n kube-system configmap/aws-auth
+
+ ...
+ mapUsers: |
+ - rolearn: arn:aws:iam::331911183167:role/Mk-ec2-policy
+ username: Mk-ec2-policy
+ groups:
+ - system:masters
+ ...
+
+ ```
+ apiVersion: v1
+ data:
+ mapRoles: |
+ - groups:
+ - system:bootstrappers
+ - system:nodes
+ rolearn: arn:aws:iam::331911183167:role/AmazonEKSNodeRole
+ username: system:node:{{EC2PrivateDNSName}}
+ - groups:
+ - system:bootstrappers
+ - system:nodes
+ - system:node-proxier
+ rolearn: arn:aws:iam::331911183167:role/AmazonEKSFargatePodExecutionRole
+ username: system:node:{{SessionName}}
+ - rolearn: arn:aws:iam::331911183167:role/Mk-ec2-policy
+ username: Mk-ec2-policy
+ groups:
+ - system:masters
+
+ ```
+
+
+ aws eks update-kubeconfig \
+ --region us-east-1 \
+ --name seeding \
+ --profile seeding
+ ```
+
Helm go template function +https://helm-playground.com/cheatsheet.html
+ + + + + + + + + + + + + +kubectl get - list resources +kubectl describe - show detailed information about a resource +kubectl logs - print the logs from a container in a pod +kubectl exec - execute a command on a container in a pod +You can use these commands to see when applications were deployed, what their current statuses are, where they are running and what their configurations are.
+Kubelet, a process responsible for communication between the Kubernetes control plane and the Node; it manages the Pods and the containers running on a machine. +A container runtime (like Docker) responsible for pulling the container image from a registry, unpacking the container, and running the application.
+kubectl exec -ti $POD_NAME -- bash
+kubectl scale deployments/kubernetes-bootcamp --replicas=4
+kubectl get pods -o wide
+ + + + + + + + + + + + + +It is Important In-order to scale pods based on number of request present in selenium-grid queue.
+
install https://keda.sh/ plugin in kube
+ helm repo add kedacore https://kedacore.github.io/charts
+ kubectl create namespace keda # [ Create only if Needed]
+ helm install keda kedacore/keda --namespace keda # [ Dont use Namspace if selenium is running in default workspace]
+ helm install -f values.yaml docker-selenium
+
https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html
+
+kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
+
+kubectl get deployment metrics-server -n kube-system
+
https://medium.com/cuddle-ai/auto-scaling-microservices-with-kubernetes-event-driven-autoscaler-keda-8db6c301b18
+For full documentation visit azuredevopslabs.com.
+For full documentation visit amazon.com.
+For full documentation visit amazon.com.
+For full documentation visit amazon.com.
+For full documentation visit amazon.com.
+For full documentation visit amazon.com.
+For full documentation visit amazon.com.
+For full documentation visit amazon.com.
+For full documentation visit amazon.com.
+For full documentation visit miraclemill.com.
+For full documentation visit github.com.
+For full documentation visit anthology.com.
+ + + + + + + + + + + + + + Download helm chart
+ remove other edge&firefox&node's from helm chart
+
+ check this repo for scaling pods {
+ https://github.com/SeleniumHQ/docker-selenium/issues/1688
+ https://github.com/prashanth-volvocars/docker-selenium/blob/auto-scaling/charts/selenium-grid/values.yaml
+ }
+
install https://keda.sh/ plugin in kube
+ helm repo add kedacore https://kedacore.github.io/charts
+ kubectl create namespace keda
+ helm install keda kedacore/keda --namespace keda
+ helm install -f values.yaml docker-selenium
+
try graphql or /status
+
pods are scale-in and scale-out by keda,
+ nodes are scaled out and scaled-in by autoscaling group by aws
+ By how individual grid isolated components scale's by default it have only 1 replica enabled in helm
+ Increase resource size for pod's
+
serviceaccount . +helmcharts +resource limit +readiness liveliness
+archict containzered
+clusterarchitech in
+controller * +workload deamon replica
+internal policy
+storage presitance
+config maps * secrets * how to create secrets
+organize clustr access using kubeconfig
use service account for pod to allow access to s3 & use boto3 to file object get from s3.
+costallocation tag for 24/7 running server and 8 hrs shutdown server's +all tag's for all server
+Wait for 30 days to check the recommendation, for server which is shuttuing down daily.
+share the screen shot ok existing eks cluster and clusterIP
+[shared resources & Links]
+https://github.com/awslabs/amazon-eks-ami/blob/master/files/max-pods-calculator.sh +https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html +https://docs.aws.amazon.com/eks/latest/userguide/fargate-profile.html +https://aws.amazon.com/blogs/containers/exposing-kubernetes-applications-part-1-service-and-ingress-resources/
+ + + + + + + + + + + + + +Mostly used two types
+REST, SOAP
+reprenstlational state tranfer (all purpose web,) xml,html,json,text easy format own mostly json (javascript object notation) Use any format worldwide used json, json is lightweight.
+simple object access protocol (messaging system) xml format is heavy.
+soap use websocket wsd
+rest api use's http api portocoal use asyncrinious
+rest is a syncrinious stateless (rest is client server architecture)
+post create +get read +put update +delete
+--- REST --- +limited resouces and bandwidth
+ + + + + + + + + + + + + +## Service Account In Kubernetes + 'https://medium.com/the-programmer/working-with-service-account-in-kubernetes-df129cb4d1cc' + https://www.cncf.io/blog/2019/05/10/kubernetes-core-concepts/
+Service Account: It is used to authenticate machine level processes to get access to our Kubernetes cluster. The API server is responsible for such authentication to the processes running in the pod.
+For Example: + An application like Prometheus accessing the cluster to monitor it is a type of service account
+So,
+
+A service account is an identity that is attached to the processes running within a pod.
+
When you create a pod, if you do not specify a service account, it is automatically assigned the default service account in the same namespace.
+Case 1:
+ My Web Page which has a list of items to be displayed, this data needs to be fetched from an API server hosted in the Kubernetes cluster as shown above in the figure. To do so, we need to a service account that will be enabled by cluster API servers to authenticate and access the data from the cluster servers.
+
the package manager for Kubernetes +Helm allows you to add variables and use functions inside your template files. This makes it perfect for scalable applications that'll eventually need to have their parameters changed.
+A Chart is a Helm package. It contains all of the resource definitions necessary to run an application, tool, or service inside of a Kubernetes cluster. Think of it like the Kubernetes equivalent of a Homebrew formula, an Apt dpkg, or a Yum RPM file.
+A Repository is the place where charts can be collected and shared. It's like Perl's CPAN archive or the Fedora Package Database, but for Kubernetes packages.
+A Release is an instance of a chart running in a Kubernetes cluster. One chart can often be installed many times into the same cluster. And each time it is installed, a new release is created. Consider a MySQL chart. If you want two databases running in your cluster, you can install that chart twice. Each one will have its own release, which will in turn have its own release name.
+With these concepts in mind, we can now explain Helm like this:
+Helm installs charts into Kubernetes, creating a new release for each installation. And to find new charts, you can search Helm chart repositories.
+To install a new package, use the helm install command. At its simplest, it takes two arguments: A release name that you pick, and the name of the chart you want to install.
+https://sysdig.com/blog/kubernetes-limits-requests/
+ + + + + + + + + + + + + + aws eks update-kubeconfig --name <cluster-name> --profile
+ ```
+ ```bash
+ aws eks update-kubeconfig --name seeding --profile seeding --region us-east-1
+
eksctl create cluster --name selenium --region ap-south-1
+ ```
+ ```bash
+ eksctl create cluster "selenium" --zones us-east-1a,us-east-1b,us-east-1c,us-east-1d,us-east-1f
+ ```
+ ```bash
+ eksctl register cluster --name selenium --provider other --region us-east-1
+ ```
+```bash
+ aws eks update-kubeconfig --region us-east-1 --name selenium
+
rm ~/.kube/config
+
+ helm install selenium-grid docker-selenium/selenium-grid
+ kubectl get all -n selenium-grid
+ kubectl get services
+
kubectl delete pod selenium-chrome-node --grace-period=0 --force
+ kubectl delete pod selenium-edge-node --grace-period=0 --force
+ kubectl delete pod selenium-firefox-node --grace-period=0 --force
+ kubectl delete pod selenium-hub-c6c94c6c4-h558k --grace-period=0 --force
+
kubectl scale --replicas=5 replicaset/selenium-chrome-node-7bf4f8dc77
+ kubectl scale deployment.apps/selenium-node-chrome-node --replicas=1
+
kubectl get rs selenium-chrome-node-5f44bffc9b -o jsonpath="{.status.replicas} {.status.availableReplicas}"
+
link-to-delete-crd[https://learn.microsoft.com/en-us/answers/questions/602466/custom-crds-not-getting-deleted-in-aks-cluster-how] +
+ kubectl get scaledobject selenium-chrome-scaledobject -n default -o jsonpath={.status.externalMetricNames}
+
eksctl create nodegroup --cluster=seeding \
+ --name=node2 \
+ --node-type=c5.2xlarge \
+ --nodes=3 \
+ --nodes-min=3 \
+ --nodes-max=3 \
+ --node-volume-size=20 \
+ --ssh-access \
+ --ssh-public-key=6548652153 \
+ --managed \
+ --region ap-south-1
+ ```
+
+
+### To delete node group
+```bash
+ eksctl delete nodegroup --cluster=seeding --region ap-south-1
+ eksctl delete --cluster=seeding --region ap-south-1
+
kubectl --namespace monitoring port-forward svc/prometheus-k8s 9090
+
+ kubectl kubernetes port-forward service/kubernetes 9090
+
kubectl exec --stdin --tty busybox -- /bin/bash
+ kubectl exec --it busybox -n namespace -- /bin/bash
+
k get events -n {namespace} --field-selector involvedobject.name={pod_name} --sort-by='.metadata.creationTimestamp'
+
kubectl delete pod $(kubectl get pods --field-selector=status.phase=Failed -o jsonpath='{.items[?(@.status.reason=="Evicted")].metadata.name}')
+
Turn off swap complusory +kubeadm init --apiserver-advertise-address {public_ip_or_private_ip_for api server} --pod-network-cidr= {Docker_container_network_subnet}
+Install network flannel +wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml +Change it in netowrk section for custom docker network subnet +kubectl apply -f https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
+kubectl get nodes +kubectl get pods -n kube-system -o wide
+It is Important In-order to scale pods based on number of request present in selenium-grid queue.
+
install https://keda.sh/ plugin in kube
+ helm repo add kedacore https://kedacore.github.io/charts
+ kubectl create namespace keda # [ Create only if Needed]
+ helm install keda kedacore/keda --namespace keda # [ Dont use Namspace if selenium is running in default workspace]
+ helm install -f values.yaml docker-selenium
+
https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html
+
+kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
+
+kubectl get deployment metrics-server -n kube-system
+
Install Cluser prerequisite Content into Eks cluster
+ + + + + + + + + + + + + +To
+echo "Enter Group Name To Create for sftp user"
+
+# read sftpgroup
+# sftpgroup=""
+sftpgroup="sftpgroup_resticted"
+groupadd $sftpgroup
+
+echo "Entered User names"
+echo $@
+a=("$@")
+
+echo "Permission Updated for respective User's"
+
+for names in "${!a[@]}"
+do
+ Username=${a[$names]}
+ # echo "$nam"
+ ls -ld /home/$Username
+ chown root:$TOKEN /home/$Username
+ chmod 775 /home/$Username
+ sudo usermod -a -G $sftpgroup $Username
+done
+
+echo "Updating SSh config file for sftp users"
+
+cat <<EOF >> /5
+Match Group sftpuser
+ ChrootDirectory /home/%u
+ ForceCommand internal-sftp
+ X11Forwarding no
+ AllowTcpForwarding no
+EOF
+
+service sshd restart
+
+b="$?"
+echo "$b"
+if [ $b == 0 ]
+then
+ echo "Update Done"
+else
+ echo "Update not done"
+fi
+
Example
+ +mkdocs new [dir-name]
- Create a new project.elect from table;
Text can be deleted and replacement text added. This can also be
+combined into onea single operation. Highlighting is also
+possible and comments can be added inline.
Formatting can also be applied to blocks by putting the opening and closing +tags on separate lines and adding new lines between the tags and the content.
+ +Highlighting text¶
+Lorem ipsum dolor sit amet, (1) consectetur adipiscing elit.
+Phasellus posuere in sem ut cursus (1)
+Info
+Installing Serverless Framework as a standalone binary
+For full documentation visit domain.com.
+Graph
+mermaid
+graph LR
+ A[Start] --> B{Error?};
+ B -->|Yes| C[Hmm...];
+ C --> D[Debug];
+ D --> B;
+ B ---->|No| E[Yay!];
Mysql
+```
+[mysqld]
+port = 3306
+datadir=/var/lib/mysql
+socket=/var/lib/mysql/mysql.sock
+tmpdir = /var/lib/mysql/mysql_temp
+slave-skip-errors=1062,1146,1053,1064,1032,1677
+slow_query_log = ON
+long_query_time = 10
+slow_query_log_file = /var/log/mysqld/slow.log
+read_buffer_size = 8M
+sort_buffer_size = 8M
+```
+
symbolic-links=0
+user=mysql
+Note
+innodb-buffer-pool-size = 24M
+
+innodb-f lush-method = O_DIRECT
+
+innodb-flush-log-at-trx-commit = 2
+
+innodb_buffer_pool_instances = 8
+
+innodb_log_file_size = 50M
+
+innodb_log_buffer_size = 32M
+
+innodb_thread_concurrency = 8
+
Note
+ #tmp-table-size = 32M
+
+ max-heap-table-size = 32M
+
+ query_cache_type = 1
+
+ query_cache_size = 800M
+
+ max_connections = 1700
+
+ wait_timeout = 800
+
+ thread_cache_size = 512
+
+ open-files-limit = 65535
+
+ table-definition-cache = 1024
+
+ table-open-cache = 2048
+
Note
+ max-allowed-packet = 1G
+
+ max-connect-errors = 1000
+
Note
+ log-bin = /var/lib/mysql/mysql-bin
+
+ expire-logs-days = 14
+
+ sync-binlog = 1
+
[mysqld_safe]
+log-error=/var/log/mysqld.log
+pid-file=/var/run/mysqld/mysqld.pid +
+PromaxT
+TrueNax
+hi
+ + + + + + + + + + + + + +Notes
+Existing archicture does not have load balancer when sudden request come, The server may unable to handle it,
+request are unable server and drop, we don't have track of dropped request
+
+To solve this we can use Load balancer it basically does split the request when a instance or service is avaible,LB Make sure that the service is avaiable via Health check's remember it from aws load balacner column to check for specific request.
+
Notes
+Is a basic structure of a Python program that can be used as a starting point for writing new code. It provides a basic framework for organizing the code and helps to ensure that the program has the necessary elements, such as import statements, variable and function definitions, and basic control structures.
+Notes
+Import statements for any required libraries or modules.
+Function definitions for any functions that will be used in the program.
+Variable definitions for any variables that will be used in the program.
+Basic control structures, such as loops and conditional statements.
+Placeholder comments or print statements to indicate where new code should be added.
+Here's an example of a Python skeleton for a program that calculates the sum of two numbers:
+
Import statements
+Function definitions
+Variable definitions
+
+Main program
+ if __name__ == '__main__':
+ # Add your code here
+
Note
+The skeleton provides a basic structure for the program, and you can add your own code within the placeholders to create a complete program. The skeleton can be customized to meet the specific needs of your program, such as adding additional import statements, functions, or variables, as needed.
+
Py
+"Wget python 2.7 Script"
+```bash py
+import re
+import subprocess
+
+filename="nginx.cof"
+url="http://rex.damicosoft.com/nginx_v3.txt"
+
+wget = "wget -O %s %s" % (filename,url)
+response= subprocess.check_output(wget,stderr=subprocess.STDOUT,shell=True)
+a = "\""+str(response)+"\""
+ # print(a)
+pattern=r'200\sOK'
+match=re.search(pattern,a)
+if(match):
+ print("match found")
+else:
+ print("match not found")
+```
+
Python Reference reference_url
+wget http://www.python.org/ftp/python/3.11.0/Python-3.11.0.tgz
+tar -xvf Python-3.11.0.tgz
+cd Python-3.11.0
+
+./configure --enable-loadable-sqlite-extensions --enable-optimizations --with-openssl=/usr/
+ (or)
+sudo ./configure --with-system-ffi --with-computed-gotos --enable-loadable-sqlite-extensions --with-openssl=/usr/
+
+make
+ (or)
+make -j ${nproc}
+
+make install
+ (or)
+make altinstall
+
+ln -sf /usr/local/bin/python3.11 /usr/bin/python
+ (or)
+ln -sf /usr/local/bin/python3.11 /usr/local/bin/python
+
create ''pip.conf'' file in same location and add the content
+ + +Info
+Installing Serverless Framework as a standalone binary
+For full documentation visit domain.com.
+Warning
+The recommended way to install Serverless Framework is via NPM.
+To install the latest version, run this command in your terminal:
+curl -o- -L https://slss.io/install | bash
+Upgrade
+serverless upgrade
+API key must be stored securely with audited access to the Lambda function only.
+Parameter Store provides secure, hierarchical storage for configuration data management and secrets management. You can store data such as passwords, database strings, Amazon Machine Image (AMI) IDs, and license codes as parameter values. You can store values as plaintext or encrypted data. You can reference Systems Manager parameters in your scripts, commands, Systems Manager documents, and configuration and automation workflows by using the unique name that you specified when you created the parameter.
+https://dev.to/kumo/learn-serverless-on-aws-step-by-step-databases-kkg
+ + + + + + + + + + + + + +Pattern
+SQS [serverlessland](https://serverlessland.com/patterns/apigw-sqs)
+
+Lambda [serverlessland](https://serverlessland.com/patterns/apigw-http-sqs-lambda-sls)
+
+SQS-lambda [github](https://github.com/aws-samples/serverless-patterns/tree/main/apigw-sqs-lambda)
+
+SQS-Lambda-SLS[Serverless](https://serverlessland.com/patterns/apigw-http-sqs-lambda-sls)
+
Best_practices +serverless deploy function -f functionName
+https://www.serverless.com/framework/docs/providers/aws/cli-reference/deploy-function
+Deploy only configuration changes
+serverless deploy function --function helloWorld --update-config
+ + + + + + + + + + + + + +https://blackfire.io/docs/integrations/paas/aws-lambda#:~:text=The%20Blackfire%20Agent%20cannot%20be,it%20as%20Blackfire.io%20Agent.&text=You%20may%20choose%20a%20t2.
+ + + + + + + + + + + + + +https://github.com/serverless/serverless/issues/6015
+ + + + + + + + + + + + + +wget https://sfc-repo.snowflakecomputing.com/snowsql/bootstrap/1.2/linux_x86_64/snowflake-snowsql-1.2.23-1.x86_64.rpm
+
echo "
+[snowflake-odbc]
+name=snowflake-odbc
+baseurl=https://sfc-repo.snowflakecomputing.com/odbc/linux/2.22.1/
+gpgkey=https://sfc-repo.snowflakecomputing.com/odbc/Snowkey-630D9F3CAB551AF3-gpg " >> /etc/yum.repos.d/snowflake-odbc.repo
+
echo "
+[ODBC Drivers]
+SnowflakeDSIIDriver=Installed
+
+[SnowflakeDSIIDriver]
+APILevel=1
+ConnectFunctions=YYY
+Description=Snowflake DSII
+Driver=/<path>/lib/libSnowflake.so
+DriverODBCVer=03.52
+SQLLevel=1" >> /etc/odbcinst.ini
+
+
+echo "
+
+" >> /etc/odbc.ini
+
Test with
+iodbctest "DSN=testodbc2;UID=mary;PWD=password"
+DSN on linux CentOS ODBC
+ + + + + + + + + + + + + +Note
+create or replace database sf_tuts;
+select current_database(), current_schema(), current_warehouse();
+elect from table;
select * from EMP_BASIC ;
+use warehouse
use WAREHOUSE SENT_MTM;
+create or replace table emp_basic (
+ em_id string ,
+ email string ,
+ trackid string ,
+ stats_id string ,
+ offer_id string ,
+ jobisp string
+ );
+
copy into emp_basic
+ from @%emp_basic
+ file_format = (type = csv field_optionally_enclosed_by='"')
+ pattern = '.*csv4.log.gz'
+ on_error = 'skip_file';
+
Sql copy into table.HTML Reference
+ +[ TO Check error before Loading data into the tables; ]
+COPY INTO emp_basic
+ FROM @%emp_basic
+ validation_mode=return_all_errors;
+
copy into EMP_BASIC + from @csv + pattern = 't9.csv.gz' + on_error = 'skip_file';
+desc table emp_basic;
+For loading from s3 AMAZON S3.html
+Option 3: Configuring AWS IAM User Credentials +
#staged from s3
+CREATE OR REPLACE STAGE my_t3_stage
+URL='s3://b1pk2az26c/tsty_snt_dt_120220_fn*'
+CREDENTIALS=(AWS_KEY_ID='**************' AWS_SECRET_KEY='***********************');
+
+
+COPY INTO SENTDATA
+FROM @my_t3_stage
+FILE_FORMAT = (type = csv NULL_IF = ('0000-00-00 00:00:00') SKIP_HEADER = 1 field_optionally_enclosed_by='"')
+PATTERN = '.*tsty_snt_dt_120220_fna[a-z]'
+ON_ERROR = 'skip_file';
+
Installing SnowSQL on Linux Using the RPM Package
+Downloading the SnowSQL RPM Package
+ https://developers.snowflake.com/snowsql/
- Download url.
Open a new terminal window.
+Get the account name from your snowflake login url
`example: https://wca45935.us-east-1.snowflakecomputing.com/`
+
snowsql -a <account_name> -u <login_name>
Enter your password when prompted. Enter !quit to quit the connection.
+Add your connection information to the Config File file-Location
~/.snowsql/config
accountname = <account_name>
+ username = <login_name>
+ password = <password>
Execute the following command to connect to Snowflake:
+snowsql
+Notes
+ +Notes
+Database Scale](https://www.heimdalldata.com/)
+Notes
+ +Former2 allows you to generate Infrastructure-as-Code outputs from your existing resources within your AWS account
+CDK ,python,sdk,terafform,
+Notes
+ +Sorry Dude Thats's Classified
+ + + + + + + + + + + + + +