[AWS] Amazon Web Services #aws
http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html https://www.youtube.com/watch?v=_wiGpBQGCjU
echo $USER
sudo echo "$USER ALL=(ALL) NOPASSWD:ALL" | sudo tee -a /etc/sudoers
sudo su
apt-get update
apt-get install -y build-essential dkms linux-headers-$(uname -r)
cd /media/aws-admin/
sh ./VBoxLinuxAdditions.run
shutdown now
sudo apt-get install -y python-dev python-pip
sudo pip install awscli
aws --version
aws configure
cat <file> # output a file
tee # split output into a file
cut -f 2 # print the 2nd column, per line
sed -n '5{p;q}' # print the 5th line in a file
sed 1d # print all lines, except the first
tail -n +2 # print all lines, starting on the 2nd
head -n 5 # print the first 5 lines
tail -n 5 # print the last 5 lines
expand # convert tabs to 4 spaces
unexpand -a # convert 4 spaces to tabs
wc # word count
tr ' ' \\t # translate / convert characters to other characters
sort # sort data
uniq # show only unique entries
paste # combine rows of text, by line
join # combine rows of text, by initial column value
http://docs.aws.amazon.com/cli/latest/reference/cloudtrail/ 5 Trails total, with support for resource level permissions
# list all trails
aws cloudtrail describe-trails
# list all S3 buckets
aws s3 ls
# create a new trail
aws cloudtrail create-subscription \
--name awslog \
--s3-new-bucket awslog2016
# list the names of all trails
aws cloudtrail describe-trails --output text | cut -f 8
# get the status of a trail
aws cloudtrail get-trail-status \
--name awslog
# delete a trail
aws cloudtrail delete-trail \
--name awslog
# delete the S3 bucket of a trail
aws s3 rb s3://awslog2016 --force
# add tags to a trail, up to 10 tags
aws cloudtrail add-tags \
--resource-id awslog \
--tags-list "Key=log-type,Value=all"
# list the tags of a trail
aws cloudtrail list-tags \
--resource-id-list
# remove a tag from a trail
aws cloudtrail remove-tags \
--resource-id awslog \
--tags-list "Key=log-type,Value=all"
https://blogs.aws.amazon.com/security/post/Tx15CIT22V4J8RP/How-to-rotate-access-keys-for-IAM-users http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html Limits = 5000 users, 100 group, 250 roles, 2 access keys / user
http://docs.aws.amazon.com/cli/latest/reference/iam/index.html
# list all user's info
aws iam list-users
# list all user's usernames
aws iam list-users --output text | cut -f 6
# list current user's info
aws iam get-user
# list current user's access keys
aws iam list-access-keys
# crate new user
aws iam create-user \
--user-name aws-admin2
# create multiple new users, from a file
allUsers=$(cat ./user-names.txt)
for userName in $allUsers; do
aws iam create-user \
--user-name $userName
done
# list all users
aws iam list-users --no-paginate
# get a specific user's info
aws iam get-user \
--user-name aws-admin2
# delete one user
aws iam delete-user \
--user-name aws-admin2
# delete all users
# allUsers=$(aws iam list-users --output text | cut -f 6);
allUsers=$(cat ./user-names.txt)
for userName in $allUsers; do
aws iam delete-user \
--user-name $userName
done
http://docs.aws.amazon.com/cli/latest/reference/iam/
# list policy
# http://docs.aws.amazon.com/cli/latest/reference/iam/get-account-password-policy.html
aws iam get-account-password-policy
# set policy
# http://docs.aws.amazon.com/cli/latest/reference/iam/update-account-password-policy.html
aws iam update-account-password-policy \
--minimum-password-length 12 \
--require-symbols \
--require-numbers \
--require-uppercase-characters \
--require-lowercase-characters \
--allow-users-to-change-password
# delete policy
# http://docs.aws.amazon.com/cli/latest/reference/iam/delete-account-password-policy.html
aws iam delete-account-password-policy
http://docs.aws.amazon.com/cli/latest/reference/iam/
# list all access keys
aws iam list-access-keys
# list access keys of a specific user
aws iam list-access-keys \
--user-name aws-admin2
# create a new access key
aws iam create-access-key \
--user-name aws-admin2 \
--output text | tee aws-admin2.txt
# list last access time of an access key
aws iam get-access-key-last-used \
--access-key-id AKIAINA6AJZY4EXAMPLE
# deactivate an acccss key
aws iam update-access-key \
--access-key-id AKIAI44QH8DHBEXAMPLE \
--status Inactive \
--user-name aws-admin2
# delete an access key
aws iam delete-access-key \
--access-key-id AKIAI44QH8DHBEXAMPLE \
--user-name aws-admin2
http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles.html http://docs.aws.amazon.com/cli/latest/reference/iam/
# list all groups
aws iam list-groups
# create a group
aws iam create-group --group-name FullAdmins
# delete a group
aws iam delete-group \
--group-name FullAdmins
# list all policies
aws iam list-policies
# get a specific policy
aws iam get-policy \
--policy-arn <value>
# list all users, groups, and roles, for a given policy
aws iam list-entities-for-policy \
--policy-arn <value>
# list policies, for a given group
aws iam list-attached-group-policies \
--group-name FullAdmins
# add a policy to a group
aws iam attach-group-policy \
--group-name FullAdmins \
--policy-arn arn:aws:iam::aws:policy/AdministratorAccess
# add a user to a group
aws iam add-user-to-group \
--group-name FullAdmins \
--user-name aws-admin2
# list users, for a given group
aws iam get-group \
--group-name FullAdmins
# list groups, for a given user
aws iam list-groups-for-user \
--user-name aws-admin2
# remove a user from a group
aws iam remove-user-from-group \
--group-name FullAdmins \
--user-name aws-admin2
# remove a policy from a group
aws iam detach-group-policy \
--group-name FullAdmins \
--policy-arn arn:aws:iam::aws:policy/AdministratorAccess
# delete a group
aws iam delete-group \
--group-name FullAdmins
https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html#cli-aws-s3api
# list existing S3 buckets
aws s3 ls
# create a bucket name, using the current date timestamp
bucket_name=test_$(date "+%Y-%m-%d_%H-%M-%S")
echo $bucket_name
# create a public facing bucket
aws s3api create-bucket --acl "public-read-write" --bucket $bucket_name
# verify bucket was created
aws s3 ls | grep $bucket_name
# check for public facing s3 buckets (should show the bucket name you created)
aws s3api list-buckets --query 'Buckets[*].[Name]' --output text | xargs -I {} bash -c 'if [[ $(aws s3api get-bucket-acl --bucket {} --query '"'"'Grants[?Grantee.URI==`http://acs.amazonaws.com/groups/global/AllUsers` && Permission==`READ`]'"'"' --output text) ]]; then echo {} ; fi'
# check for public facing s3 buckets, updated them to be private
aws s3api list-buckets --query 'Buckets[*].[Name]' --output text | xargs -I {} bash -c 'if [[ $(aws s3api get-bucket-acl --bucket {} --query '"'"'Grants[?Grantee.URI==`http://acs.amazonaws.com/groups/global/AllUsers` && Permission==`READ`]'"'"' --output text) ]]; then aws s3api put-bucket-acl --acl "private" --bucket {} ; fi'
# check for public facing s3 buckets (should be empty)
aws s3api list-buckets --query 'Buckets[*].[Name]' --output text | xargs -I {} bash -c 'if [[ $(aws s3api get-bucket-acl --bucket {} --query '"'"'Grants[?Grantee.URI==`http://acs.amazonaws.com/groups/global/AllUsers` && Permission==`READ`]'"'"' --output text) ]]; then echo {} ; fi'
http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html
# list all keypairs
# http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-key-pairs.html
aws ec2 describe-key-pairs
# create a keypair
# http://docs.aws.amazon.com/cli/latest/reference/ec2/create-key-pair.html
aws ec2 create-key-pair \
--key-name <value> --output text
# create a new local private / public keypair, using RSA 4096-bit
ssh-keygen -t rsa -b 4096
# import an existing keypair
# http://docs.aws.amazon.com/cli/latest/reference/ec2/import-key-pair.html
aws ec2 import-key-pair \
--key-name keyname_test \
--public-key-material file:///home/apollo/id_rsa.pub
# delete a keypair
# http://docs.aws.amazon.com/cli/latest/reference/ec2/delete-key-pair.html
aws ec2 delete-key-pair \
--key-name <value>
http://docs.aws.amazon.com/cli/latest/reference/ec2/index.html
# list all security groups
aws ec2 describe-security-groups
# create a security group
aws ec2 create-security-group \
--vpc-id vpc-1a2b3c4d \
--group-name web-access \
--description "web access"
# list details about a securty group
aws ec2 describe-security-groups \
--group-id sg-0000000
# open port 80, for everyone
aws ec2 authorize-security-group-ingress \
--group-id sg-0000000 \
--protocol tcp \
--port 80 \
--cidr 0.0.0.0/24
# get my public ip
my_ip=$(dig +short myip.opendns.com @resolver1.opendns.com);
echo $my_ip
# open port 22, just for my ip
aws ec2 authorize-security-group-ingress \
--group-id sg-0000000 \
--protocol tcp \
--port 80 \
--cidr $my_ip/24
# remove a firewall rule from a group
aws ec2 revoke-security-group-ingress \
--group-id sg-0000000 \
--protocol tcp \
--port 80 \
--cidr 0.0.0.0/24
# delete a security group
aws ec2 delete-security-group \
--group-id sg-00000000
https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-images.html
# list all private AMI's, ImageId and Name tags
aws ec2 describe-images --filter "Name=is-public,Values=false" \
--query 'Images[].[ImageId, Name]' \
--output text | sort -k2
http://docs.aws.amazon.com/cli/latest/reference/ec2/index.html
# list all instances (running, and not running)
# http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html
aws ec2 describe-instances
# list all instances running
aws ec2 describe-instances --filters Name=instance-state-name,Values=running
# create a new instance
# http://docs.aws.amazon.com/cli/latest/reference/ec2/run-instances.html
aws ec2 run-instances \
--image-id ami-f0e7d19a \
--instance-type t2.micro \
--security-group-ids sg-00000000 \
--dry-run
# stop an instance
# http://docs.aws.amazon.com/cli/latest/reference/ec2/terminate-instances.html
aws ec2 terminate-instances \
--instance-ids <instance_id>
# list status of all instances
# http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instance-status.html
aws ec2 describe-instance-status
# list status of a specific instance
aws ec2 describe-instance-status \
--instance-ids <instance_id>
# list instance IP addresses
aws ec2 describe-instances \
--query "Reservations[*].Instances[*].PublicIpAddress" \
--output=text
# list the tags of an instance
# http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-tags.html
aws ec2 describe-tags
# add a tag to an instance
# http://docs.aws.amazon.com/cli/latest/reference/ec2/create-tags.html
aws ec2 create-tags \
--resources "ami-1a2b3c4d" \
--tags Key=name,Value=debian
# delete a tag on an instance
# http://docs.aws.amazon.com/cli/latest/reference/ec2/delete-tags.html
aws ec2 delete-tags \
--resources "ami-1a2b3c4d" \
--tags Key=Name,Value=
http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/WhatIsCloudWatchLogs.html http://docs.aws.amazon.com/cli/latest/reference/logs/index.html#cli-aws-logs
http://docs.aws.amazon.com/cli/latest/reference/logs/create-log-group.html
aws logs create-log-group \
--log-group-name "DefaultGroup"
http://docs.aws.amazon.com/cli/latest/reference/logs/describe-log-groups.html
aws logs describe-log-groups
aws logs describe-log-groups \
--log-group-name-prefix "Default"
http://docs.aws.amazon.com/cli/latest/reference/logs/delete-log-group.html
aws logs delete-log-group \
--log-group-name "DefaultGroup"
# Log group names can be between 1 and 512 characters long. Allowed
# characters include a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen),
# '/' (forward slash), and '.' (period).
# create a log stream
# http://docs.aws.amazon.com/cli/latest/reference/logs/create-log-stream.html
aws logs create-log-stream \
--log-group-name "DefaultGroup" \
--log-stream-name "syslog"
# list details on a log stream
# http://docs.aws.amazon.com/cli/latest/reference/logs/describe-log-streams.html
aws logs describe-log-streams \
--log-group-name "syslog"
aws logs describe-log-streams \
--log-stream-name-prefix "syslog"
# delete a log stream
# http://docs.aws.amazon.com/cli/latest/reference/logs/delete-log-stream.html
aws logs delete-log-stream \
--log-group-name "DefaultGroup" \
--log-stream-name "Default Stream"
http://docs.aws.amazon.com/cli/latest/reference/cloudwatch/index.html
A curated list of AWS resources to prepare for the AWS Certifications
A curated list of awesome AWS resources you need to prepare for the all 5 AWS Certifications. This gist will include: open source repos, blogs & blogposts, ebooks, PDF, whitepapers, video courses, free lecture, slides, sample test and many other resources.
OP: @leonardofed founder @ plainflow.
Index:
You will find you make less errors when you don’t feel rushed on time.
The AWS Certified Solutions Architect – Associate exam is intended for individuals with experience designing distributed applications and systems on the AWS platform.
Exam concepts you should understand for this exam include:
Candidate Overview description provided by the AWS documentation
Eligible candidates for this exam have:
- One or more years of hands-on experience designing available, cost efficient, fault tolerant, and scalable distributed systems on AWS
- In-depth knowledge of at least one high-level programming language
- Ability to identify and define requirements for an AWS-based application
- Experience with deploying hybrid systems with on-premises and AWS components
- Capability to provide best practices for building secure and reliable applications on the AWS platform
AWS Knowledge required for the Exam:
Key items you should know before you take the exam:
General IT Knowledge preferred for the Exam:
In this AWS whitepaper you'll find a sample exam. Here's a preview:
Here are some general observations by Miha Kralj in this great post.
First of all, associate certifications are not hard, and if you have a chance to take the AWS training, then you just need to concentrate on reading the training slides is more than enough! Remember to read the details explanation under the slides. Good understanding of VPC and IAM is important for all associate exams.
--
Dan-Claudiu Dragos shared his experience here on how he prepared for the AWS Solutions Architect Certifications in 7 days and succesfully passed it.
I'd like to share my experience of getting AWS CSA(A) certified with Cloud Academy:
The background:
The process:
On the exam itself:
That's it. I'm the number 16.891, not sure if this is small or big, or even if it matters.
A redditor on r/aws gave awesome tips about the exam day
This is a curated list of hands-on material to help you passing this AWS Certification! This advanced list of selected points are especially for students who already have a working knowledge of AWS and who have passed the Solutions Architect - Associate Certification for AWS exam (prerequisite for sitting the Solutions Architect - Professional Certification for AWS exam). This should be helpful to build and develop your skills as an AWS professional.
Candidates must show two forms of personal identification (ID). Primary form must be a valid, government-issued ID containing both a photo and signature. The secondary form of ID needs to be valid and contain a signature.
Acceptable Forms of Primary ID (name, photograph, signature, valid/current):
To be eligible for this exam, you must already be certified at the AWS Certified Solutions Architect – Associate Level. You should have multiple years of hands-on experience designing and deploying cloud architecture on AWS, along with the ability to evaluate cloud application requirements and make architectural recommendations for implementation, deployment, and provisioning applications on AWS. Additionally, you should have the experience and the capability to provide best practices guidance on the architectural design across multiple applications, projects, or the enterprise.
Note that in the event that you fail to pass an AWS certification exam, you may retake the exam subject to the following conditions:
a. You must wait 14 days from the day you fail to take the exam again
b. You can take an exam up to three times in one year from the date of your first attempt
This is valid for any AWS Certifiation Exam.
To pass the AWS Certified Solutions Architect - Professional exam, you have to master advanced and technical skills, not to mention the experience in designing distributed applications and systems using AWS. Check the short list below to understand you need to master in order to pass the exam.
This exam tests your knowledge of advanced AWS use cases. Eligible candidates for this exam have:
Credits to Chris Beckett @ BlueClouds
In this PDF you can download the Sample Question provided by AWS We reviewed all the questions provided by AWS and you can find the correct answers marked in bold.
Which AWS based disaster recovery strategy will give you the best RTO?
A) Deploy the Oracle database and the JBoss app server on EC2. Restore the RMAN Oracle backups from
Amazon S3. Generate an EBS volume of static content from the Storage Gateway and attach it to the
JBoss EC2 server.
B) Deploy the Oracle database on RDS. Deploy the JBoss app server on EC2. Restore the RMAN Oracle
backups from Amazon Glacier. Generate an EBS volume of static content from the Storage Gateway and
attach it to the JBoss EC2 server.
C) Deploy the Oracle database and the JBoss app server on EC2. Restore the RMAN Oracle backups from
Amazon S3. Restore the static content by attaching an AWS Storage Gateway running on Amazon EC2
as an iSCSI volume to the JBoss EC2 server.
D) Deploy the Oracle database and the JBoss app server on EC2. Restore the RMAN Oracle backups from
Amazon S3. Restore the static content from an AWS Storage Gateway-VTL running on Amazon EC2
An ERP application is deployed in multiple Availability Zones in a single region. In the event of failure, the RTO must be less than 3 hours, and the RPO is 15 minutes. The customer realizes that data corruption occurred roughly 1.5 hours ago. Which DR strategy can be used to achieve this RTO and RPO in the event of this kind of failure?
A) Take 15-minute DB backups stored in Amazon Glacier, with transaction logs stored in Amazon S3 every
5 minutes.
B) Use synchronous database master-slave replication between two Availability Zones.
C) Take hourly DB backups to Amazon S3, with transaction logs stored in S3 every 5 minutes.
D) Take hourly DB backups to an Amazon EC2 instance store volume, with transaction logs stored in
Amazon S3 every 5 minutes.
The Marketing Director in your company asked you to create a mobile app that lets users post sightings of good deeds known as random acts of kindness in 80-character summaries. You decided to write the application in JavaScript so that it would run on the broadest range of phones, browsers, and tablets. Your application should provide access to Amazon DynamoDB to store the good deed summaries. Initial testing of a prototype shows that there aren’t large spikes in usage. Which option provides the most costeffective and scalable architecture for this application?
A) Provide the JavaScript client with temporary credentials from the Security Token Service using a Token
Vending Machine (TVM) on an EC2 instance to provide signed credentials mapped to an Amazon Identity
and Access Management (IAM) user allowing DynamoDB puts and S3 gets. You serve your mobile
application out of an S3 bucket enabled as a web site. Your client updates DynamoDB.
B) Register the application with a Web Identity Provider like Amazon, Google, or Facebook, create an IAM
role for that provider, and set up permissions for the IAM role to allow S3 gets and DynamoDB puts. You
serve your mobile application out of an S3 bucket enabled as a web site. Your client updates DynamoDB.
C) Provide the JavaScript client with temporary credentials from the Security Token Service using a Token
Vending Machine (TVM) to provide signed credentials mapped to an IAM user allowing DynamoDB puts.
You serve your mobile application out of Apache EC2 instances that are load-balanced and autoscaled.
Your EC2 instances are configured with an IAM role that allows DynamoDB puts. Your server updates
DynamoDB.
D) Register the JavaScript application with a Web Identity Provider like Amazon, Google, or Facebook,
create an IAM role for that provider, and set up permissions for the IAM role to allow DynamoDB puts.
You serve your mobile application out of Apache EC2 instances that are load-balanced and autoscaled.
Your EC2 instances are configured with an IAM role that allows DynamoDB puts. Your server updates
DynamoDB.
You are building a website that will retrieve and display highly sensitive information to users. The amount of traffic the site will receive is known and not expected to fluctuate. The site will leverage SSL to protect the communication between the clients and the web servers. Due to the nature of the site you are very concerned about the security of your SSL private key and want to ensure that the key cannot be accidentally or intentionally moved outside your environment. Additionally, while the data the site will display is stored on an encrypted EBS volume, you are also concerned that the web servers’ logs might contain some sensitive information; therefore, the logs must be stored so that they can only be decrypted by employees of your company. Which of these architectures meets all of the requirements?
A) Use Elastic Load Balancing to distribute traffic to a set of web servers. To protect the SSL private key,
upload the key to the load balancer and configure the load balancer to offload the SSL traffic. Write your
web server logs to an ephemeral volume that has been encrypted using a randomly generated AES key.
B) Use Elastic Load Balancing to distribute traffic to a set of web servers. Use TCP load balancing on the
load balancer and configure your web servers to retrieve the private key from a private Amazon S3
bucket on boot. Write your web server logs to a private Amazon S3 bucket using Amazon S3 server-side
encryption.
C) Use Elastic Load Balancing to distribute traffic to a set of web servers, configure the load balancer to
perform TCP load balancing, use an AWS CloudHSM to perform the SSL transactions, and write your
web server logs to a private Amazon S3 bucket using Amazon S3 server-side encryption.
D) Use Elastic Load Balancing to distribute traffic to a set of web servers. Configure the load balancer to
perform TCP load balancing, use an AWS CloudHSM to perform the SSL transactions, and write your
web server logs to an ephemeral volume that has been encrypted using a randomly generated AES key.
You are designing network connectivity for your fat client application. The application is designed for business travelers who must be able to connect to it from their hotel rooms, cafes, public Wi-Fi hotspots, and elsewhere on the Internet. You do not want to publish the application on the Internet. Which network design meets the above requirements while minimizing deployment and operational costs?
A) Implement AWS Direct Connect, and create a private interface to your VPC. Create a public subnet and
place your application servers in it.
B) Implement Elastic Load Balancing with an SSL listener that terminates the back-end connection to the
application.
C) Configure an IPsec VPN connection, and provide the users with the configuration details. Create a public
subnet in your VPC, and place your application servers in it.
D) Configure an SSL VPN solution in a public subnet of your VPC, then install and configure SSL VPN client
software on all user computers. Create a private subnet in your VPC and place your application servers in
it.
Your company hosts an on-premises legacy engineering application with 900GB of data shared via a central file server. The engineering data consists of thousands of individual files ranging in size from megabytes to multiple gigabytes. Engineers typically modify 5-10 percent of the files a day. Your CTO would like to migrate this application to AWS, but only if the application can be migrated over the weekend to minimize user downtime. You calculate that it will take a minimum of 48 hours to transfer 900GB of data using your company’s existing 45-Mbps Internet connection. After replicating the application’s environment in AWS, which option will allow you to move the application’s data to AWS without losing any data and within the given timeframe?
A) Copy the data to Amazon S3 using multiple threads and multi-part upload for large files over the
weekend, and work in parallel with your developers to reconfigure the replicated application environment
to leverage Amazon S3 to serve the engineering files.
B) Sync the application data to Amazon S3 starting a week before the migration, on Friday morning perform
a final sync, and copy the entire data set to your AWS file server after the sync completes.
C) Copy the application data to a 1-TB USB drive on Friday and immediately send overnight, with Saturday
delivery, the USB drive to AWS Import/Export to be imported as an EBS volume, mount the resulting EBS
volume to your AWS file server on Sunday.
D) Leverage the AWS Storage Gateway to create a Gateway-Stored volume. On Friday copy the application
data to the Storage Gateway volume. After the data has been copied, perform a snapshot of the volume
and restore the volume as an EBS volume to be attached to your AWS file server on Sunday.
Just a couple of suggestions to pass the AWS Solutions Architect Professional level certification:
If you've found this gist useful you can follow me @leonardofed for more info about AWS Certifications.