diff --git a/deployment/build_packages.sh b/deployment/build_packages.sh index d0c53c5a..e249d109 100755 --- a/deployment/build_packages.sh +++ b/deployment/build_packages.sh @@ -23,7 +23,7 @@ SCRIPT_PATH="$( cd "$(dirname "$0")" ; pwd -P )" PACKAGES_DIR="${SCRIPT_PATH}/packages/" LIBRARY="${SCRIPT_PATH}/../hammer/library" -LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api ecs-privileged-access-issues-identification ecs-logging-issues-identification ecs-external-image-source-issues-identification redshift-audit-logging-issues-identification redshift-unencrypted-cluster-identification redshift-cluster-public-access-identification elasticsearch-domain-logging-issues-identification elasticsearch-unencrypted-domain-identification elasticsearch-public-access-domain-identification" +LAMBDAS="ami-info logs-forwarder ddb-tables-backup sg-issues-identification s3-acl-issues-identification s3-policy-issues-identification iam-keyrotation-issues-identification iam-user-inactive-keys-identification cloudtrails-issues-identification ebs-unencrypted-volume-identification ebs-public-snapshots-identification rds-public-snapshots-identification sqs-public-policy-identification s3-unencrypted-bucket-issues-identification rds-unencrypted-instance-identification ami-public-access-issues-identification api ecs-privileged-access-issues-identification ecs-logging-issues-identification ecs-external-image-source-issues-identification redshift-audit-logging-issues-identification redshift-unencrypted-cluster-identification redshift-cluster-public-access-identification elasticsearch-domain-logging-issues-identification elasticsearch-unencrypted-domain-identification elasticsearch-public-access-domain-identification realtime-hammer-scanner" pushd "${SCRIPT_PATH}" > /dev/null pushd ../hammer/identification/lambdas > /dev/null diff --git a/deployment/cf-templates/identification-crossaccount-realtime-role.json b/deployment/cf-templates/identification-crossaccount-realtime-role.json new file mode 100644 index 00000000..f0d8ea23 --- /dev/null +++ b/deployment/cf-templates/identification-crossaccount-realtime-role.json @@ -0,0 +1,56 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer Realtime Alerting Cloudwatch Cloudformation Stack", + "Parameters": { + "ResourcesPrefix": { + "Type": "String", + "MinLength": "3", + "Default": "hammer-" + }, + "MasterAccountID": { + "Type": "String", + "MinLength": "12" + }, + "IdentificationCrossAccountIAMRole": { + "Type": "String", + "Default": "cloudsec-crossact-id" + } + }, + "Resources": { + "HammerRealtimeCrossAccountEventBusRole": { + "Type": "AWS::IAM::Role", + "Properties": { + "RoleName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, "realtime-eventbus-forwarding-role"] + ]}, + "AssumeRolePolicyDocument": { + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": {"Service": ["events.amazonaws.com"] }, + "Action": "sts:AssumeRole" + }] + }, + "Path": "/", + "Policies": [{ + "PolicyName": "HammerCrossAccountIdentificationPolicy", + "PolicyDocument": { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "EventBusForwarding", + "Effect": "Allow", + "Action": [ + "events:PutEvents" + ], + "Resource": [ + {"Fn::Join": [ "", ["arn:aws:events:*:", {"Ref": "MasterAccountID"}, ":event-bus/default"] ] } + ] + } + ] + } + }] + } + } + + } +} \ No newline at end of file diff --git a/deployment/cf-templates/identification.json b/deployment/cf-templates/identification.json index eaddc546..9f8782f1 100755 --- a/deployment/cf-templates/identification.json +++ b/deployment/cf-templates/identification.json @@ -18,6 +18,7 @@ "SourceS3Bucket", "SourceLogsForwarder", "SourceBackupDDB", + "SourceRealtimeScanner", "SourceIdentificationSG", "SourceIdentificationCloudTrails", "SourceIdentificationS3ACL", @@ -72,6 +73,9 @@ "SourceBackupDDB": { "default": "Relative path to BackupDDB lambda sources" }, + "SourceRealtimeScanner": { + "default": "Relative path to Realtime Scanning lambda sources" + }, "SourceIdentificationSG": { "default": "Relative path to Insecure services lambda sources" }, @@ -173,6 +177,10 @@ "Type": "String", "Default": "ddb-tables-backup.zip" }, + "SourceRealtimeScanner": { + "Type": "String", + "Default": "realtime-hammer-scanner.zip" + }, "SourceIdentificationSG": { "Type": "String", "Default": "sg-issues-identification.zip" @@ -350,6 +358,9 @@ "SNSDisplayNameCloudTrails": { "value": "describe-cloudtrails-sns" }, + "SNSTopicyNameRealtimeIssueUpdate": { + "value": "issue-realtime-notification-sns" + }, "SNSTopicNameCloudTrails": { "value": "describe-cloudtrails-lambda" }, @@ -479,6 +490,9 @@ "BackupDDBLambdaFunctionName": { "value": "backup-ddb" }, + "RealtimeScannerLambdaFunctionName": { + "value": "realtime-scanner" + }, "SecurityGroupLambdaFunctionName": { "value": "security-groups" }, @@ -584,6 +598,128 @@ "RetentionInDays": "7" } }, + "LambdaRealtimeScanner": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaRealtimeScanner"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceRealtimeScanner" } + }, + "Description": "Lambda function for realtime scanning", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "RealtimeScannerLambdaFunctionName", "value"] } ] + ]}, + "Handler": "realtime_hammer_scanner.lambda_handler", + "MemorySize": 256, + "Timeout": "900", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, + "LogGroupLambdaRealtimeScanner": { + "Type" : "AWS::Logs::LogGroup", + "Properties" : { + "LogGroupName": {"Fn::Join": ["", [ "/aws/lambda/", + { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", + "RealtimeScannerLambdaFunctionName", + "value"] + } ] ] }, + "RetentionInDays": "7" + } + }, + "LambdaEventSourceMappingRealtimeScanner": { + "Type": "AWS::Lambda::EventSourceMapping", + "DependsOn": ["LambdaRealtimeScanner", "SQSRealtimeScanner"], + "Properties": { + "BatchSize": 10, + "Enabled": true, + "EventSourceArn": { "Fn::GetAtt" : [ "SQSRealtimeScanner", "Arn" ] }, + "FunctionName": { "Fn::GetAtt" : [ "LambdaRealtimeScanner", "Arn" ] } + } + }, + "SQSRealtimeScanner": { + "Type": "AWS::SQS::Queue", + "DependsOn": ["LambdaRealtimeScanner"], + "Properties": { + "VisibilityTimeout": 3600, + "QueueName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "RealtimeScannerLambdaFunctionName", "value"] }, "-sqs" ] + ]} + } + }, + "SQSNotifcationFromCloudTrail": { + "Type": "AWS::SQS::QueuePolicy", + "DependsOn": "SQSRealtimeScanner", + "Properties": { + "PolicyDocument": { + "Version": "2012-10-17", + "Id": "SQSRealtimeScannerPolicy", + "Statement": [ + { + "Effect": "Allow", + "Principal": "*", + "Action": "sqs:SendMessage", + "Resource": { "Fn::GetAtt" : [ "SQSRealtimeScanner", "Arn" ] }, + "Condition": { + "ArnEquals": { + "aws:SourceArn": { + "Fn::Join": [ + "", [ + "arn:aws:sns:*:", + { + "Ref": "AWS::AccountId" + }, + ":", + { + "Ref": "ResourcesPrefix" + }, + "*" + ] + ] + } + } + } + } + ] + }, + "Queues": [ + { + "Ref": "SQSRealtimeScanner" + } + ] + } + }, + "LambdaBackupDDB": { + "Type": "AWS::Lambda::Function", + "DependsOn": ["LogGroupLambdaBackupDDB"], + "Properties": { + "Code": { + "S3Bucket": { "Ref": "SourceS3Bucket" }, + "S3Key": { "Ref": "SourceBackupDDB" } + }, + "Description": "Lambda function for backup hammer DDB tables", + "FunctionName": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" }, + { "Fn::FindInMap": ["NamingStandards", "BackupDDBLambdaFunctionName", "value"] } ] + ]}, + "Handler": "ddb_tables_backup.lambda_handler", + "MemorySize": 256, + "Timeout": "900", + "Role": {"Fn::Join" : ["", [ "arn:aws:iam::", + { "Ref": "AWS::AccountId" }, + ":role/", + { "Ref": "ResourcesPrefix" }, + { "Ref": "IdentificationIAMRole" } + ] ]}, + "Runtime": "python3.6" + } + }, "LambdaBackupDDB": { "Type": "AWS::Lambda::Function", "DependsOn": ["LogGroupLambdaBackupDDB"], @@ -1600,6 +1736,8 @@ } }, "Outputs": { - "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }} + "LambdaLogsForwarderArn": {"Value": { "Fn::GetAtt": ["LambdaLogsForwarder", "Arn"] }}, + "SQSRealtimeScannerArn": {"Value": { "Fn::GetAtt": ["SQSRealtimeScanner", "Arn"] }} + } } diff --git a/deployment/cf-templates/realtime-crossacc-hammer-cloudwatch-filter.json b/deployment/cf-templates/realtime-crossacc-hammer-cloudwatch-filter.json new file mode 100644 index 00000000..956e7c6f --- /dev/null +++ b/deployment/cf-templates/realtime-crossacc-hammer-cloudwatch-filter.json @@ -0,0 +1,135 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer Realtime Alerting Cloudwatch Cloudformation Stack", + "Parameters": { + "ResourcesPrefix": { + "Type": "String", + "MinLength": "3", + "Default": "hammer-" + }, + "MasterAccountID": { + "Type": "String", + "MinLength": "12" + }, + "IdentificationCrossAccountIAMRole": { + "Type": "String", + "Default": "cloudsec-crossact-id" + } + }, + "Resources": { + "CloudWatchEventRealtimeScanner": { + "Type": "AWS::Events::Rule", + "Properties": { + "Description": "Cloud Watch Event to filter for Hammer specific Events in all Accounts", + "Name": "Cloudsec-Hammer-Realtime-Monitoring-CloudWatch-Event", + "State": "ENABLED", + "Targets": [ + { + "Arn": {"Fn::Join":["", ["arn:aws:events:", {"Ref":"AWS::Region"}, ":", {"Ref":"MasterAccountID"}, ":event-bus/default"]]}, + "Id": "default", + "RoleArn": {"Fn::Join":["", ["arn:aws:iam::", {"Ref":"AWS::AccountId"}, ":role/", { "Ref": "ResourcesPrefix" }, "realtime-eventbus-forwarding-role"]]} + } + ], + "EventPattern": { + "detail-type": [ + "AWS API Call via CloudTrail" + ], + "detail": { + "eventName": [ + "CreateSecurityGroup", + "DeleteSecurityGroup", + "UpdateSecurityGroupRuleDescriptionsEgress", + "AuthorizeSecurityGroupEgress", + "AuthorizeSecurityGroupIngress", + "RevokeSecurityGroupEgress", + "RevokeSecurityGroupIngress", + "UpdateSecurityGroupRuleDescriptionsIngress", + "CopySnapshot", + "CreateSnapshot", + "CreateSnapshots", + "DeleteSnapshot", + "ModifySnapshotAttribute", + "ResetSnapshotAttribute", + "DisableEbsEncryptionByDefault", + "EnableEbsEncryptionByDefault", + "ModifyEbsDefaultKmsKeyId", + "ResetEbsDefaultKmsKeyId", + "AttachVolume", + "CreateVolume", + "DeleteVolume", + "DetachVolume", + "ModifyVolume", + "ModifyVolumeAttribute", + "AddRoleToInstanceProfile", + "AddUserToGroup", + "AttachGroupPolicy", + "AttachRolePolicy", + "AttachUserPolicy", + "ChangePassword", + "CreateAccessKey", + "CreateUser", + "CreateVirtualMFADevice", + "CreatePolicy", + "CreateRole", + "DeactivateMFADevice", + "DeleteAccessKey", + "DeleteAccountPasswordPolicy", + "DeleteGroup", + "DeleteGroupPolicy", + "DeleteLoginProfile", + "DeletePolicy", + "DeleteRole", + "DeleteRolePolicy", + "DeleteUser", + "DeleteUserPolicy", + "DeleteVirtualMFADevice", + "DetachGroupPolicy", + "DetachRolePolicy", + "DetachUserPolicy", + "user_withpassword", + "PutGroupPolicy", + "PutRolePolicy", + "PutUserPolicy", + "ResyncMFADevice", + "UpdateAccessKey", + "UpdateAccountPasswordPolicy", + "UpdateAssumeRolePolicy", + "UpdateGroup", + "UpdateLoginProfile", + "UpdateUser", + "CreateBucket", + "DeleteBucket", + "DeleteBucketPolicy", + "DeleteBucketTagging", + "PutBucketAcl", + "PutBucketPolicy", + "PutBucketReplication", + "PutBucketTagging", + "DeleteBucketEncryption", + "DeletePublicAccessBlock", + "PutBucketEncryption", + "PutPublicAccessBlock", + "AddPermission", + "CreateQueue", + "SetQueueAttributes", + "RemovePermission", + "DeleteQueue", + "CopyDBSnapshot", + "CreateDBInstance", + "CreateDBSnapshot", + "DeleteDBInstance", + "DeleteDBSnapshot", + "ModifyDBInstance", + "ModifyDBSnapshot", + "RestoreDBInstanceFromDBSnapshot", + "RestoreDBInstanceFromS3", + "RestoreDBInstanceToPointInTime", + "StartDBInstance", + "StopDBInstance" + ] + } + } + } + } + } +} \ No newline at end of file diff --git a/deployment/cf-templates/realtime-hammer-cloudwatch-filter.json b/deployment/cf-templates/realtime-hammer-cloudwatch-filter.json new file mode 100644 index 00000000..ca401485 --- /dev/null +++ b/deployment/cf-templates/realtime-hammer-cloudwatch-filter.json @@ -0,0 +1,208 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "Hammer Realtime Cloudwatch Cloudformation Stack", + + "Parameters": { + "ResourcesPrefix": { + "Type": "String", + "MinLength": "3", + "Default": "prod-hammer-" + }, + "SQSRealtimeScannerArn": { + "Type": "String" + } + }, + "Resources": { + "SNSRealtimeScanner": { + "Type": "AWS::SNS::Topic", + "Properties": { + "DisplayName": { + "Fn::Join": [ + "", + [ + { + "Ref": "ResourcesPrefix" + }, + "realtime-scanner-", + { + "Ref": "AWS::Region" + } + ] + ] + }, + "TopicName": { + "Fn::Join": [ + "", + [ + { + "Ref": "ResourcesPrefix" + }, + "realtime-scanner-", + { + "Ref": "AWS::Region" + } + ] + ] + } + } + }, + "S3NotificationTopicSubscription": { + "Type": "AWS::SNS::Subscription", + "Properties": { + "Endpoint": { + "Ref": "SQSRealtimeScannerArn" + }, + "Protocol": "sqs", + "RawMessageDelivery": true, + "TopicArn": { + "Ref": "SNSRealtimeScanner" + } + } + }, + "PermissionToInvokeSNS": { + "DependsOn": [ + "SNSRealtimeScanner", + "CloudWatchEventRealtimeScanner" + ], + "Type": "AWS::SNS::TopicPolicy", + "Properties": { + "PolicyDocument": { + "Id": "PermissionToInvokeSNSPolicy", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "PermissionToInvokeSNSPolicyId", + "Effect": "Allow", + "Principal": { + "Service": "events.amazonaws.com" + }, + "Action": [ + "sns:Publish" + ], + "Resource": "*" + } + ] + }, + "Topics": [ + { + "Ref": "SNSRealtimeScanner" + } + ] + } + }, + "CloudWatchEventRealtimeScanner": { + "Type": "AWS::Events::Rule", + "Properties": { + "Description": "Cloud Watch Event to filter for Hammer specific Events", + "Name": {"Fn::Join" : ["", [ { "Ref": "ResourcesPrefix" },"Realtime-Monitoring-CloudWatch-Event-Trigger"] + ]}, + "State": "ENABLED", + "Targets": [ + { + "Arn": {"Ref" : "SNSRealtimeScanner"}, + "Id": "SNSRealtimeScanner" + } + ], + "EventPattern": { + "detail-type": [ + "AWS API Call via CloudTrail" + ], + "detail": { + "eventName": [ + "CreateSecurityGroup", + "DeleteSecurityGroup", + "UpdateSecurityGroupRuleDescriptionsEgress", + "AuthorizeSecurityGroupEgress", + "AuthorizeSecurityGroupIngress", + "RevokeSecurityGroupEgress", + "RevokeSecurityGroupIngress", + "UpdateSecurityGroupRuleDescriptionsIngress", + "CopySnapshot", + "CreateSnapshot", + "CreateSnapshots", + "DeleteSnapshot", + "ModifySnapshotAttribute", + "ResetSnapshotAttribute", + "DisableEbsEncryptionByDefault", + "EnableEbsEncryptionByDefault", + "ModifyEbsDefaultKmsKeyId", + "ResetEbsDefaultKmsKeyId", + "AttachVolume", + "CreateVolume", + "DeleteVolume", + "DetachVolume", + "ModifyVolume", + "ModifyVolumeAttribute", + "AddRoleToInstanceProfile", + "AddUserToGroup", + "AttachGroupPolicy", + "AttachRolePolicy", + "AttachUserPolicy", + "ChangePassword", + "CreateAccessKey", + "CreateUser", + "CreateVirtualMFADevice", + "CreatePolicy", + "CreateRole", + "DeactivateMFADevice", + "DeleteAccessKey", + "DeleteAccountPasswordPolicy", + "DeleteGroup", + "DeleteGroupPolicy", + "DeleteLoginProfile", + "DeletePolicy", + "DeleteRole", + "DeleteRolePolicy", + "DeleteUser", + "DeleteUserPolicy", + "DeleteVirtualMFADevice", + "DetachGroupPolicy", + "DetachRolePolicy", + "DetachUserPolicy", + "user_withpassword", + "PutGroupPolicy", + "PutRolePolicy", + "PutUserPolicy", + "ResyncMFADevice", + "UpdateAccessKey", + "UpdateAccountPasswordPolicy", + "UpdateAssumeRolePolicy", + "UpdateGroup", + "UpdateLoginProfile", + "UpdateUser", + "CreateBucket", + "DeleteBucket", + "DeleteBucketPolicy", + "DeleteBucketTagging", + "PutBucketAcl", + "PutBucketPolicy", + "PutBucketReplication", + "PutBucketTagging", + "DeleteBucketEncryption", + "DeletePublicAccessBlock", + "PutBucketEncryption", + "PutPublicAccessBlock", + "AddPermission", + "CreateQueue", + "SetQueueAttributes", + "RemovePermission", + "DeleteQueue", + "CopyDBSnapshot", + "CreateDBInstance", + "CreateDBSnapshot", + "DeleteDBInstance", + "DeleteDBSnapshot", + "ModifyDBInstance", + "ModifyDBSnapshot", + "RestoreDBInstanceFromDBSnapshot", + "RestoreDBInstanceFromS3", + "RestoreDBInstanceToPointInTime", + "StartDBInstance", + "StopDBInstance" + ] + } + } + } + } + } +} \ No newline at end of file diff --git a/deployment/configs/config.json b/deployment/configs/config.json index 2fcb37ae..3467d4da 100755 --- a/deployment/configs/config.json +++ b/deployment/configs/config.json @@ -41,7 +41,12 @@ } }, "api": { - "ddb.table_name": "hammer-api-requests" + "ddb.table_name": "hammer-api-requests", + "accounts": { + "123456789012": "master", + "210987654321": "slave1", + "654321210987": "slave2" + } }, "whitelisting_procedure_url": "", "credentials": { diff --git a/deployment/configs/realtime_hammer_mapping.json b/deployment/configs/realtime_hammer_mapping.json new file mode 100644 index 00000000..4424be4f --- /dev/null +++ b/deployment/configs/realtime_hammer_mapping.json @@ -0,0 +1,93 @@ +{ + "CreateSecurityGroup": ["secgrp_unrestricted_access"], + "DeleteSecurityGroup": ["secgrp_unrestricted_access"], + "UpdateSecurityGroupRuleDescriptionsEgress": ["secgrp_unrestricted_access"], + "AuthorizeSecurityGroupEgress": ["secgrp_unrestricted_access"], + "AuthorizeSecurityGroupIngress": ["secgrp_unrestricted_access"], + "RevokeSecurityGroupEgress": ["secgrp_unrestricted_access"], + "RevokeSecurityGroupIngress":["secgrp_unrestricted_access"], + "UpdateSecurityGroupRuleDescriptionsIngress": ["secgrp_unrestricted_access"], + "CopySnapshot": ["ebs_public_snapshot"], + "CreateSnapshot": ["ebs_public_snapshot"], + "CreateSnapshots": ["ebs_public_snapshot"], + "DeleteSnapshot": ["ebs_public_snapshot"], + "ModifySnapshotAttribute": ["ebs_public_snapshot"], + "ResetSnapshotAttribute": ["ebs_public_snapshot"], + "DisableEbsEncryptionByDefault": ["ebs_unencrypted_volume"], + "EnableEbsEncryptionByDefault": ["ebs_unencrypted_volume"], + "ModifyEbsDefaultKmsKeyId": ["ebs_unencrypted_volume"], + "ResetEbsDefaultKmsKeyId": ["ebs_unencrypted_volume"], + "AttachVolume": ["ebs_unencrypted_volume"], + "CreateVolume": ["ebs_unencrypted_volume"], + "DeleteVolume": ["ebs_unencrypted_volume"], + "DetachVolume": ["ebs_unencrypted_volume"], + "ModifyVolume": ["ebs_unencrypted_volume"], + "ModifyVolumeAttribute": ["ebs_unencrypted_volume"], + "AddRoleToInstanceProfile": ["user_iprestriction"], + "AddUserToGroup": ["user_iprestriction"], + "AttachGroupPolicy": ["user_iprestriction"], + "AttachRolePolicy": ["user_iprestriction"], + "AttachUserPolicy": ["user_iprestriction"], + "ChangePassword": ["user_withpassword"], + "CreateAccessKey": ["user_inactivekeys", "user_keysrotation"], + "CreateUser": ["user_iprestriction", "user_withpassword"], + "CreateVirtualMFADevice": ["user_withpassword"], + "CreatePolicy": ["user_iprestriction"], + "CreateRole": ["user_iprestriction"], + "DeactivateMFADevice": ["user_withpassword"], + "DeleteAccessKey": ["user_inactivekeys", "user_keysrotation"], + "DeleteAccountPasswordPolicy": ["user_withpassword"], + "DeleteGroup": ["user_iprestriction"], + "DeleteGroupPolicy": ["user_iprestriction"], + "DeleteLoginProfile": ["user_iprestriction"], + "DeletePolicy": ["user_iprestriction"], + "DeleteRole": ["user_iprestriction"], + "DeleteRolePolicy": ["user_iprestriction"], + "DeleteUser": ["user_iprestriction", "user_withpassword"], + "DeleteUserPolicy": ["user_iprestriction", "user_withpassword"], + "DeleteVirtualMFADevice": ["user_withpassword"], + "DetachGroupPolicy": [" user_iprestriction"], + "DetachRolePolicy": ["user_iprestriction"], + "DetachUserPolicy": ["user_iprestriction", "user_withpassword"], + "user_withpassword": ["user_withpassword"], + "PutGroupPolicy": ["user_iprestriction"], + "PutRolePolicy": ["user_iprestriction"], + "PutUserPolicy": ["user_iprestriction", "user_withpassword"], + "ResyncMFADevice": ["user_withpassword"], + "UpdateAccessKey": ["user_inactivekeys", "user_keysrotation"], + "UpdateAccountPasswordPolicy": ["user_withpassword"], + "UpdateAssumeRolePolicy": ["user_iprestriction"], + "UpdateGroup": ["user_iprestriction"], + "UpdateLoginProfile": ["user_iprestriction", "user_withpassword"], + "UpdateUser": ["user_iprestriction", "user_withpassword"], + "CreateBucket": ["s3_bucket_acl", "s3_encryption", "s3_bucket_policy", "s3_tagging"], + "DeleteBucket": ["s3_bucket_acl", "s3_encryption", "s3_bucket_policy", "s3_tagging"], + "DeleteBucketPolicy": ["s3_bucket_policy"], + "DeleteBucketTagging": ["s3_tagging"], + "PutBucketAcl": ["s3_bucket_acl"], + "PutBucketPolicy": ["s3_bucket_policy"], + "PutBucketReplication": ["s3_bucket_acl", "s3_bucket_policy"], + "PutBucketTagging": ["s3_tagging"], + "DeleteBucketEncryption": ["s3_encryption"], + "DeletePublicAccessBlock": ["s3_bucket_policy", "s3_bucket_policy"], + "PutBucketEncryption": ["s3_encryption"], + "PutPublicAccessBlock": ["s3_bucket_acl", "s3_bucket_policy"], + "AddPermission": ["sqs_public_access"], + "CreateQueue": ["sqs_public_access"], + "SetQueueAttributes": ["sqs_public_access"], + "RemovePermission": ["sqs_public_access"], + "DeleteQueue": ["sqs_public_access"], + "CopyDBSnapshot": ["rds_public_snapshot"], + "CreateDBInstance": ["rds_encryption"], + "CreateDBSnapshot": ["rds_public_snapshot"], + "DeleteDBInstance": ["rds_encryption"], + "DeleteDBSnapshot": ["rds_public_snapshot"], + "ModifyDBInstance": ["rds_encryption", "rds_public_snapshot"], + "ModifyDBSnapshot": ["rds_public_snapshot"], + "RestoreDBInstanceFromDBSnapshot": ["rds_encryption"], + "RestoreDBInstanceFromS3": ["rds_encryption"], + "RestoreDBInstanceToPointInTime": ["rds_encryption"], + "StartDBInstance": ["rds_encryption"], + "StopDBInstance": ["rds_encryption"], + "PutObject": ["secret_scanner"] + } \ No newline at end of file diff --git a/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py b/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py index fef2f057..b88ac084 100644 --- a/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py +++ b/hammer/identification/lambdas/ami-public-access-issues-identification/describe_public_ami_issues.py @@ -23,6 +23,8 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -58,7 +60,7 @@ def lambda_handler(event, context): issue.issue_details.tags = ami.tags issue.issue_details.name = ami.name issue.issue_details.region = region - + issue.issue_details.cloudtrail = cloudtrail if config.publicAMIs.in_temp_whitelist(account_id, ami.id): issue.status = IssueStatus.Tempwhitelist elif config.publicAMIs.in_whitelist(account_id, ami.id): diff --git a/hammer/identification/lambdas/api/entrypoint.py b/hammer/identification/lambdas/api/entrypoint.py index a4c0e163..af6996da 100644 --- a/hammer/identification/lambdas/api/entrypoint.py +++ b/hammer/identification/lambdas/api/entrypoint.py @@ -3,6 +3,7 @@ import logging import uuid import time +import hashlib import boto3 @@ -32,8 +33,20 @@ def get_sns_topic_arn(config, topic_name): return f"arn:aws:sns:{region}:{account_id}:{topic_name}" -GLOBAL_SECURITY_FEATURES = ['s3_bucket_acl', 'user_inactivekeys', 'user_keysrotation', 's3_bucket_policy', - 's3_encryption'] +GLOBAL_SECURITY_FEATURES = [ + 'ec2_public_ami', + 'ebs_unencrypted_volume', + 'ebs_public_snapshot', + 'user_inactivekeys', + 'user_keysrotation', + 'rds_encryption', + 'rds_public_snapshot', + 's3_bucket_acl', + 's3_bucket_policy', + 's3_encryption', + 'secgrp_unrestricted_access', + 'sqs_public_access' + ] def start_scan(account_id, regions, security_features, tags, ids): @@ -87,7 +100,19 @@ def start_scan(account_id, regions, security_features, tags, ids): "security_features": to_scan, "tags": tags } - request_id = uuid.uuid4().hex + + ### Make UUID a hash of the account_id, regions, security_feature, invocation_reason, request_parameters to not have duplicate requests per realtime scan + request_id = hashlib.sha256(json.dumps(request_params).encode("utf-8")).hexdigest() + + ### Then Read DDB to see if we already have that request + request_info = DDB.get_request_data(api_table, request_id) + if request_info is not None and 'progress' in request_info and 'total' in request_info: + if request_info['progress'] != request_info['total']: + logging.info("A current scan on this resource is already running... Not going to perform dumplicate scan...") + return { + "statusCode": 200, + "body": "A current scan on this resource is already running... Not going to perform dumplicate scan..." + } DDB.add_request(api_table, request_id, request_params, total) @@ -99,7 +124,8 @@ def start_scan(account_id, regions, security_features, tags, ids): "account_name": account_name, "regions": regions, "sns_arn": topic_arn, - "request_id": request_id + "request_id": request_id, + "tags": tags } Sns.publish(topic_arn, payload) diff --git a/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py b/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py index 81f86e3b..df89dbbb 100755 --- a/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py +++ b/hammer/identification/lambdas/cloudtrails-issues-identification/describe_cloudtrails.py @@ -23,6 +23,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -53,6 +54,7 @@ def lambda_handler(event, context): if checker.check(): if checker.disabled or checker.delivery_errors: issue = CloudTrailIssue(account_id, region) + issue.issue_details.cloudtrail = cloudtrail issue.issue_details.disabled = checker.disabled issue.issue_details.delivery_errors = checker.delivery_errors issue.add_trails(checker.trails) diff --git a/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py b/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py index 5a901899..2fbfb839 100755 --- a/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py +++ b/hammer/identification/lambdas/ebs-public-snapshots-identification/describe_ebs_public_snapshots.py @@ -23,6 +23,18 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + + # if request_id is present in payload, it means this lambda was called from the API + request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = cloudtrail['resource'] + except: + logging.debug(f"No snapshotId found in tags for message {cloudtrail}") + except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -43,20 +55,22 @@ def lambda_handler(event, context): logging.debug(f"Checking for public EBS snapshots in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, EBSPublicSnapshotIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, EBSPublicSnapshotIssue, issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} logging.debug(f"Public EBS snapshots in DDB:\n{open_issues.keys()}") + issue_ids_to_check = None if issue_id is None else [issue_id] checker = EBSPublicSnapshotsChecker(account=account) - if checker.check(): + if checker.check(ids=issue_ids_to_check): for snapshot in checker.snapshots: if snapshot.public: issue = EBSPublicSnapshotIssue(account_id, snapshot.id) issue.issue_details.region = snapshot.account.region issue.issue_details.volume_id = snapshot.volume_id issue.issue_details.tags = snapshot.tags + issue.issue_details.cloudtrail = cloudtrail if config.ebsSnapshot.in_temp_whitelist(account_id, snapshot.id): issue.status = IssueStatus.Tempwhitelist diff --git a/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py b/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py index b5f3764e..a21dbb63 100755 --- a/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py +++ b/hammer/identification/lambdas/ebs-unencrypted-volume-identification/describe_ebs_unencrypted_volumes.py @@ -23,6 +23,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -59,7 +60,7 @@ def lambda_handler(event, context): issue.issue_details.state = volume.state issue.issue_details.attachments = volume.attachments issue.issue_details.tags = volume.tags - + issue.issue_details.cloudtrail = cloudtrail if config.ebsVolume.in_temp_whitelist(account_id, volume.id): issue.status = IssueStatus.Tempwhitelist elif config.ebsVolume.in_whitelist(account_id, volume.id): diff --git a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py index 8694c1e5..72e6447d 100644 --- a/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py +++ b/hammer/identification/lambdas/ecs-external-image-source-issues-identification/describe_ecs_external_image_source_issues.py @@ -22,6 +22,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -58,7 +59,7 @@ def lambda_handler(event, context): issue.issue_details.tags = task_definition.tags issue.issue_details.container_image_details = task_definition.container_image_details issue.issue_details.region = task_definition.account.region - + issue.issue_details.cloudtrail = cloudtrail if config.ecs_external_image_source.in_temp_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Tempwhitelist elif config.ecs_external_image_source.in_whitelist(account_id, task_definition.name): diff --git a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py index 0c595cf4..179e4f0f 100644 --- a/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py +++ b/hammer/identification/lambdas/ecs-logging-issues-identification/describe_ecs_logging_issues.py @@ -22,6 +22,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -58,7 +59,7 @@ def lambda_handler(event, context): issue.issue_details.task_definition_arn = task_definition.arn issue.issue_details.disabled_logging_container_names = task_definition.disabled_logging_container_names issue.issue_details.tags = task_definition.tags - + issue.issue_details.cloudtrail = cloudtrail if config.ecs_logging.in_temp_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Tempwhitelist elif config.ecs_logging.in_whitelist(account_id, task_definition.name): diff --git a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py index f23edf50..84e09382 100644 --- a/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py +++ b/hammer/identification/lambdas/ecs-privileged-access-issues-identification/describe_ecs_privileged_access_issues.py @@ -22,6 +22,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -58,6 +59,7 @@ def lambda_handler(event, context): issue.issue_details.tags = task_definition.tags issue.issue_details.privileged_container_names = task_definition.privileged_container_names issue.issue_details.region = task_definition.account.region + issue.issue_details.cloudtrail = cloudtrail if config.ecs_privileged_access.in_temp_whitelist(account_id, task_definition.name): issue.status = IssueStatus.Tempwhitelist elif config.ecs_privileged_access.in_whitelist(account_id, task_definition.name): diff --git a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py index 6eb906ba..587f9d77 100644 --- a/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py +++ b/hammer/identification/lambdas/elasticsearch-domain-logging-issues-identification/describe_elasticsearch_domains_logging_issues.py @@ -23,6 +23,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -58,7 +59,7 @@ def lambda_handler(event, context): issue.issue_details.id = domain.id issue.issue_details.arn = domain.arn issue.issue_details.tags = domain.tags - + issue.issue_details.cloudtrail = cloudtrail if config.esLogging.in_temp_whitelist(account_id, domain.name): issue.status = IssueStatus.Tempwhitelist elif config.esLogging.in_whitelist(account_id, domain.name): diff --git a/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py index bc20bea8..0c1afa91 100644 --- a/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py +++ b/hammer/identification/lambdas/elasticsearch-public-access-domain-identification/describe_elasticsearch_public_access_domains.py @@ -23,6 +23,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -59,7 +60,7 @@ def lambda_handler(event, context): issue.issue_details.arn = domain.arn issue.issue_details.tags = domain.tags issue.issue_details.policy = domain.policy - + issue.issue_details.cloudtrail = cloudtrail if config.esPublicAccess.in_temp_whitelist(account_id, domain.name): issue.status = IssueStatus.Tempwhitelist elif config.esPublicAccess.in_whitelist(account_id, domain.name): diff --git a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py index b039d851..0821221d 100644 --- a/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py +++ b/hammer/identification/lambdas/elasticsearch-unencrypted-domain-identification/describe_elasticsearch_unencrypted_domains.py @@ -23,6 +23,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -60,7 +61,7 @@ def lambda_handler(event, context): issue.issue_details.tags = domain.tags issue.issue_details.encrypted_at_rest = domain.encrypted_at_rest issue.issue_details.encrypted_at_transit = domain.encrypted_at_transit - + issue.issue_details.cloudtrail = cloudtrail if config.esEncrypt.in_temp_whitelist(account_id, domain.name): issue.status = IssueStatus.Tempwhitelist elif config.esEncrypt.in_whitelist(account_id, domain.name): diff --git a/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py b/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py index 2727f22d..7487bbcf 100755 --- a/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py +++ b/hammer/identification/lambdas/iam-keyrotation-issues-identification/describe_iam_key_rotation.py @@ -18,8 +18,15 @@ def lambda_handler(event, context): payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] - # if request_id is present in payload then this lambda was called from the API + # if request_id is present in payload, it means this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = cloudtrail['resource'] + except: + logging.debug(f"No userName found in tags for message {cloudtrail}") except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -39,7 +46,7 @@ def lambda_handler(event, context): logging.debug(f"Checking for IAM user keys rotation for {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, IAMKeyRotationIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, IAMKeyRotationIssue, issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues} @@ -48,7 +55,8 @@ def lambda_handler(event, context): checker = IAMKeyChecker(account=account, now=config.now, rotation_criteria_days=config.iamUserKeysRotation.rotation_criteria_days) - if not checker.check(last_used_check_enabled=False): + issue_ids_to_check = None if issue_id is None else [issue_id] + if not checker.check(users_to_check=issue_ids_to_check, last_used_check_enabled=False): return for user in checker.users: @@ -56,7 +64,7 @@ def lambda_handler(event, context): issue = IAMKeyRotationIssue(account_id, key.id) issue.issue_details.username = user.id issue.issue_details.create_date = key.create_date.isoformat() - + issue.issue_details.cloudtrail = cloudtrail if config.iamUserKeysRotation.in_temp_whitelist(account_id, key.id) \ or config.iamUserKeysRotation.in_temp_whitelist(account_id, user.id): issue.status = IssueStatus.Tempwhitelist diff --git a/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py b/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py index 9d5f6e9f..5b6babf0 100755 --- a/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py +++ b/hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py @@ -18,8 +18,16 @@ def lambda_handler(event, context): payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] - # if request_id is present in payload then this lambda was called from the API + # if request_id is present in payload, it means this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = cloudtrail['resource'] + except: + logging.debug(f"No userName found in tags for message {cloudtrail}") + except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -39,7 +47,7 @@ def lambda_handler(event, context): logging.debug(f"Checking for IAM user inactive keys in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, IAMKeyInactiveIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, IAMKeyInactiveIssue, issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues} @@ -48,29 +56,28 @@ def lambda_handler(event, context): checker = IAMKeyChecker(account=account, now=config.now, inactive_criteria_days=config.iamUserInactiveKeys.inactive_criteria_days) - if not checker.check(last_used_check_enabled=True): - return - - for user in checker.users: - for key in user.inactive_keys: - issue = IAMKeyInactiveIssue(account_id, key.id) - issue.issue_details.username = user.id - issue.issue_details.last_used = key.last_used.isoformat() - issue.issue_details.create_date = key.create_date.isoformat() - - if config.iamUserInactiveKeys.in_temp_whitelist(account_id, key.id) \ - or config.iamUserInactiveKeys.in_temp_whitelist(account_id, user.id): - issue.status = IssueStatus.Tempwhitelist - elif config.iamUserInactiveKeys.in_whitelist(account_id, key.id) \ - or config.iamUserInactiveKeys.in_whitelist(account_id, user.id): - issue.status = IssueStatus.Whitelisted - else: - issue.status = IssueStatus.Open - logging.debug(f"Setting {key.id}/{user.id} status {issue.status}") - IssueOperations.update(ddb_table, issue) - # remove issue id from open_issues (if exists) - # as we already checked it - open_issues.pop(key.id, None) + issue_ids_to_check = None if issue_id is None else [issue_id] + if checker.check(users_to_check=issue_ids_to_check, last_used_check_enabled=True): + for user in checker.users: + for key in user.inactive_keys: + issue = IAMKeyInactiveIssue(account_id, key.id) + issue.issue_details.username = user.id + issue.issue_details.last_used = key.last_used.isoformat() + issue.issue_details.create_date = key.create_date.isoformat() + issue.issue_details.cloudtrail = cloudtrail + if config.iamUserInactiveKeys.in_temp_whitelist(account_id, key.id) \ + or config.iamUserInactiveKeys.in_temp_whitelist(account_id, user.id): + issue.status = IssueStatus.Tempwhitelist + elif config.iamUserInactiveKeys.in_whitelist(account_id, key.id) \ + or config.iamUserInactiveKeys.in_whitelist(account_id, user.id): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {key.id}/{user.id} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from open_issues (if exists) + # as we already checked it + open_issues.pop(key.id, None) logging.debug(f"Inactive keys in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated keys diff --git a/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py b/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py index c0f0bd07..b8c0f643 100755 --- a/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py +++ b/hammer/identification/lambdas/rds-public-snapshots-identification/describe_rds_public_snapshots.py @@ -23,6 +23,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -59,7 +60,7 @@ def lambda_handler(event, context): issue.issue_details.region = snapshot.account.region issue.issue_details.engine = snapshot.engine issue.issue_details.tags = snapshot.tags - + issue.issue_details.cloudtrail = cloudtrail if config.rdsSnapshot.in_temp_whitelist(account_id, snapshot.id): issue.status = IssueStatus.Tempwhitelist elif config.rdsSnapshot.in_whitelist(account_id, snapshot.id): diff --git a/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py b/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py index 34fa4d32..9bb0dfcc 100644 --- a/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py +++ b/hammer/identification/lambdas/rds-unencrypted-instance-identification/describe_rds_instance_encryption.py @@ -21,8 +21,15 @@ def lambda_handler(event, context): account_name = payload['account_name'] # get the last region from the list to process region = payload['regions'].pop() - # if request_id is present in payload then this lambda was called from the API + # if request_id is present in payload, it means this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = [cloudtrail['resource']] + except: + logging.debug(f"No dBInstanceIdentifier found in tags for message {cloudtrail}") except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -43,7 +50,7 @@ def lambda_handler(event, context): logging.debug(f"Checking for RDS encryption in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, RdsEncryptionIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, RdsEncryptionIssue, issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} @@ -59,7 +66,7 @@ def lambda_handler(event, context): issue.issue_details.region = instance.account.region issue.issue_details.engine = instance.engine issue.issue_details.tags = instance.tags - + issue.issue_details.cloudtrail = cloudtrail if config.rdsEncrypt.in_temp_whitelist(account_id, instance.id): issue.status = IssueStatus.Tempwhitelist elif config.rdsEncrypt.in_whitelist(account_id, instance.id): diff --git a/hammer/identification/lambdas/realtime-hammer-scanner/RealtimeMonitoringREADME.md b/hammer/identification/lambdas/realtime-hammer-scanner/RealtimeMonitoringREADME.md new file mode 100644 index 00000000..1436a8f1 --- /dev/null +++ b/hammer/identification/lambdas/realtime-hammer-scanner/RealtimeMonitoringREADME.md @@ -0,0 +1,55 @@ +### Enable realtime monitoring +Get the value of SQSRealtimeScannerArn from Identification stack output +Run this on CloudSecTest Account (replace stack name with your prefix): +``` +for region in `aws ec2 describe-regions --output text | cut -f4` +do + echo "Creating time time monitoring stack in $region" + aws cloudformation deploy --template-file realtime-hammer-cloudwatch-filter.json --stack-name CloudSecurity-Hammer-Realtime-Scanner-CloudWatch-Filter-Rebase2-Dev-Stack --parameter-overrides SQSRealtimeScannerArn="" ResourcesPrefix="rebase2-dev-" --region $region +done +``` + +### Enable ReatTime Scanning Cross Account in Prod +Note: CrossAccount Scanning is only Supported in Prod as of now. + +Run this on CloudSecProd Account: +Unfortuantely EventBusPolicy seems to be bugged for Cloud Formation so we must do this step manually. +Run this on all regions: + +AP: "ap-south-1" "ap-northeast-2" "ap-northeast-1" "ap-southeast-1" "ap-southeast-2" +EU: "eu-west-3" "eu-west-2" "eu-west-1" "eu-central-1" +SA: "sa-east-1" +CA: "ca-central-1" +US: "us-east-1" "us-east-2" "us-west-1" "us-west-2" + +1) Because of a bug in CloudFormation, we cannot create an EventBus to collect CloudTrail events from all member accounts using CloudFormation. For now, we would need to do this manually. You can use the following script (replace '' with your AWS organization id): +``` +for region in `aws ec2 describe-regions --output text | cut -f4` +do + echo "Setting up EventBus for aggregating CloudTrail events in $region" + aws events put-permission --action "events:PutEvents" --principal "*" --condition Type="StringEquals",Key="aws:PrincipalOrgID",Value="" --statement-id "realtime-hammer-monitoring" --profile cloudsectest --region $region +done +``` + +Get the value of SQSRealtimeScannerArn from Identification stack output +Run this on CloudSecProduction Account: +``` +for region in `aws ec2 describe-regions --output text | cut -f4` +do + echo "Creating time time monitoring stack in $region" + aws cloudformation deploy --template-file realtime-hammer-cloudwatch-filter.json --stack-name CloudSecurity-Hammer-Realtime-Scanner-CloudWatch-Filter-Prod-Stack --parameter-overrides SQSRealtimeScannerArn="" ResourcesPrefix="prod-hammer-" --region $region +done +``` + +Run this on CentralizedDeployment Account: +Deploy the Role to to enable Cross Account Scanning +``` +aws cloudformation create-stack-instances --stack-set-name CloudSecurity-Hammer-Crossaccount-CloudTrail-Eventbus-Role --regions "us-east-1" --operation-preferences FailureToleranceCount=100,MaxConcurrentCount=20 --region us-east-1 --accounts +``` + +Deply the CloudWatch Alarm Stack Set + +Example: +make sure to specify all the regions you wish to select and accounts you wish to enable realtime monitoring +```aws cloudformation update-stack-instances --stack-set-name CloudSecurity-Hammer-Realtime-CloudTrail-Eventbus-US --regions "us-east-1" "us-east-2" "us-west-1" "us-west-2" --operation-preferences FailureToleranceCount=100,MaxConcurrentCount=20 --region us-east-1 --accounts ``` + diff --git a/hammer/identification/lambdas/realtime-hammer-scanner/realtime_hammer_scanner.py b/hammer/identification/lambdas/realtime-hammer-scanner/realtime_hammer_scanner.py new file mode 100644 index 00000000..f237d933 --- /dev/null +++ b/hammer/identification/lambdas/realtime-hammer-scanner/realtime_hammer_scanner.py @@ -0,0 +1,65 @@ +import json +import logging +import io + +from library.logger import set_logging +from botocore.vendored import requests +from library.config import Config +from gzip import GzipFile +from library.utility import CloudTrailParser + + +def lambda_handler(event, context): + + set_logging(level=logging.DEBUG) + ### Read the SNS notification and retrieve the s3 bucket and the object where the cloudtrail info is located + try: + message = json.loads(event['Records'][0]['body']) + config = Config() + api_endpoint = config.api.url + api_token = config.api.token + + ### Load the cloudwatch mappings + with open('realtime_hammer_mapping.json', 'r') as json_file: + cloudwatch_mapping = json.load(json_file) + + except Exception: + logging.exception(f"Failed to parse cloudwatch mapping\n") + return + + ### Parse through the message in and find the eventName to kick off specific hammer rules + try: + cloud_trail = CloudTrailParser(message) + scan = cloudwatch_mapping[cloud_trail.event] + user_id = cloud_trail.userArn + account_id = cloud_trail.accountID + logging.debug(cloud_trail) + payload = { + 'account_id': cloud_trail.accountID, + 'security_features': scan, + 'tags': { + 'user_id': cloud_trail.userArn, + 'invocation_reason': cloud_trail.event, + 'resource': cloud_trail.resource, + 'object_key': cloud_trail.objectKey, + 'region': cloud_trail.region, + 'event_time': cloud_trail.eventTime, + 'raw': cloud_trail.message + } + } + ### If there is a region specified, scan only that specific region + if cloud_trail.region: + payload['regions'] = [cloud_trail.region] + + + url = api_endpoint + 'identify' + logging.debug(url) + headers = { + 'Auth':api_token, + 'content-type': 'application/json' + } + logging.debug(f"Initiating Realtime Scan for ' {payload}'") + r = requests.post(url, data=json.dumps(payload), headers=headers) + logging.debug(r) + except Exception: + logging.exception(f"The following CloudTrail eventName is not currently supported {message}") diff --git a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py index 06e35ef7..071940dc 100644 --- a/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py +++ b/hammer/identification/lambdas/redshift-audit-logging-issues-identification/describe_redshift_logging_issues.py @@ -22,6 +22,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -56,7 +57,7 @@ def lambda_handler(event, context): issue = RedshiftLoggingIssue(account_id, cluster.name) issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - + issue.issue_details.cloudtrail = cloudtrail if config.redshift_logging.in_temp_whitelist(account_id, cluster.name): issue.status = IssueStatus.Tempwhitelist elif config.redshift_logging.in_whitelist(account_id, cluster.name): diff --git a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py index 7db33692..9ec4af0a 100644 --- a/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py +++ b/hammer/identification/lambdas/redshift-cluster-public-access-identification/describe_redshift_cluster_public_access.py @@ -22,6 +22,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -56,7 +57,7 @@ def lambda_handler(event, context): issue = RedshiftPublicAccessIssue(account_id, cluster.name) issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - + issue.issue_details.cloudtrail = cloudtrail if config.redshift_public_access.in_temp_whitelist(account_id, cluster.name): issue.status = IssueStatus.Tempwhitelist elif config.redshift_public_access.in_whitelist(account_id, cluster.name): diff --git a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py index 4e1c5de3..10a7e8c3 100644 --- a/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py +++ b/hammer/identification/lambdas/redshift-unencrypted-cluster-identification/describe_redshift_encryption.py @@ -22,6 +22,7 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -56,7 +57,7 @@ def lambda_handler(event, context): issue = RedshiftEncryptionIssue(account_id, cluster.name) issue.issue_details.tags = cluster.tags issue.issue_details.region = cluster.account.region - + issue.issue_details.cloudtrail = cloudtrail if config.redshiftEncrypt.in_temp_whitelist(account_id, cluster.name): issue.status = IssueStatus.Tempwhitelist elif config.redshiftEncrypt.in_whitelist(account_id, cluster.name): diff --git a/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py b/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py index 30b69fe3..91e8b2a2 100755 --- a/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py +++ b/hammer/identification/lambdas/s3-acl-issues-identification/describe_s3_bucket_acl.py @@ -17,8 +17,17 @@ def lambda_handler(event, context): payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] - # if request_id is present in payload then this lambda was called from the API + + # if request_id is present in payload, it means this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = cloudtrail['resource'] + except: + logging.debug(f"No bucketName found in tags for message {cloudtrail}") + except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -38,35 +47,35 @@ def lambda_handler(event, context): logging.debug(f"Checking for public S3 ACLs in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, S3AclIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, S3AclIssue, issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues} logging.debug(f"S3 in DDB:\n{open_issues.keys()}") checker = S3BucketsAclChecker(account=account) - if not checker.check(): - return - - for bucket in checker.buckets: - logging.debug(f"Checking {bucket.name}") - if bucket.public: - issue = S3AclIssue(account_id, bucket.name) - issue.issue_details.owner = bucket.owner - issue.issue_details.public_acls = bucket.get_public_acls() - issue.issue_details.tags = bucket.tags + issue_ids_to_check = None if issue_id is None else [issue_id] - if config.s3acl.in_temp_whitelist(account_id, bucket.name): - issue.status = IssueStatus.Tempwhitelist - elif config.s3acl.in_whitelist(account_id, bucket.name): - issue.status = IssueStatus.Whitelisted - else: - issue.status = IssueStatus.Open - logging.debug(f"Setting {bucket.name} status {issue.status}") - IssueOperations.update(ddb_table, issue) - # remove issue id from issues_list_from_db (if exists) - # as we already checked it - open_issues.pop(bucket.name, None) + if checker.check(bucket=issue_ids_to_check): + for bucket in checker.buckets: + logging.debug(f"Checking {bucket.name}") + if bucket.public: + issue = S3AclIssue(account_id, bucket.name) + issue.issue_details.owner = bucket.owner + issue.issue_details.public_acls = bucket.get_public_acls() + issue.issue_details.tags = bucket.tags + issue.issue_details.cloudtrail = cloudtrail + if config.s3acl.in_temp_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Tempwhitelist + elif config.s3acl.in_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {bucket.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(bucket.name, None) logging.debug(f"S3 in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated buckets diff --git a/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py b/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py index e852cf4b..3c308725 100755 --- a/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py +++ b/hammer/identification/lambdas/s3-policy-issues-identification/describe_s3_bucket_policy.py @@ -17,8 +17,15 @@ def lambda_handler(event, context): payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] - # if request_id is present in payload then this lambda was called from the API + # if request_id is present in payload, it means this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = cloudtrail['resource'] + except: + logging.debug(f"No bucketName found in tags for message {cloudtrail}") except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -38,35 +45,35 @@ def lambda_handler(event, context): logging.debug(f"Checking for public S3 policies in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, S3PolicyIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, S3PolicyIssue, issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues} logging.debug(f"S3 in DDB:\n{open_issues.keys()}") checker = S3BucketsPolicyChecker(account=account) - if not checker.check(): - return - - for bucket in checker.buckets: - logging.debug(f"Checking {bucket.name}") - if bucket.public: - issue = S3PolicyIssue(account_id, bucket.name) - issue.issue_details.owner = bucket.owner - issue.issue_details.tags = bucket.tags - issue.issue_details.policy = bucket.policy + issue_ids_to_check = None if issue_id is None else [issue_id] - if config.s3policy.in_temp_whitelist(account_id, bucket.name): - issue.status = IssueStatus.Tempwhitelist - elif config.s3policy.in_whitelist(account_id, bucket.name): - issue.status = IssueStatus.Whitelisted - else: - issue.status = IssueStatus.Open - logging.debug(f"Setting {bucket.name} status {issue.status}") - IssueOperations.update(ddb_table, issue) - # remove issue id from issues_list_from_db (if exists) - # as we already checked it - open_issues.pop(bucket.name, None) + if checker.check(buckets=issue_ids_to_check): + for bucket in checker.buckets: + logging.debug(f"Checking {bucket.name}") + if bucket.public: + issue = S3PolicyIssue(account_id, bucket.name) + issue.issue_details.owner = bucket.owner + issue.issue_details.tags = bucket.tags + issue.issue_details.policy = bucket.policy + issue.issue_details.cloudtrail = cloudtrail + if config.s3policy.in_temp_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Tempwhitelist + elif config.s3policy.in_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {bucket.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(bucket.name, None) logging.debug(f"S3 in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated buckets diff --git a/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py b/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py index 4e14c5fb..c231067f 100644 --- a/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py +++ b/hammer/identification/lambdas/s3-unencrypted-bucket-issues-identification/describe_s3_encryption.py @@ -17,8 +17,16 @@ def lambda_handler(event, context): payload = json.loads(event["Records"][0]["Sns"]["Message"]) account_id = payload['account_id'] account_name = payload['account_name'] - # if request_id is present in payload then this lambda was called from the API + # if request_id is present in payload, it means this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = cloudtrail['resource'] + except: + logging.debug(f"No bucketName found in tags for message {cloudtrail}") + except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -38,34 +46,33 @@ def lambda_handler(event, context): logging.debug(f"Checking for S3 encryption in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, S3EncryptionIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, S3EncryptionIssue,issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues} logging.debug(f"S3 in DDB:\n{open_issues.keys()}") checker = S3EncryptionChecker(account=account) - if not checker.check(): - return - - for bucket in checker.buckets: - logging.debug(f"Checking {bucket.name}") - if not bucket.encrypted: - issue = S3EncryptionIssue(account_id, bucket.name) - issue.issue_details.owner = bucket.owner - issue.issue_details.tags = bucket.tags - - if config.s3Encrypt.in_temp_whitelist(account_id, bucket.name): - issue.status = IssueStatus.Tempwhitelist - elif config.s3Encrypt.in_whitelist(account_id, bucket.name): - issue.status = IssueStatus.Whitelisted - else: - issue.status = IssueStatus.Open - logging.debug(f"Setting {bucket.name} status {issue.status}") - IssueOperations.update(ddb_table, issue) - # remove issue id from issues_list_from_db (if exists) - # as we already checked it - open_issues.pop(bucket.name, None) + issue_ids_to_check = None if issue_id is None else [issue_id] + if checker.check(buckets=issue_ids_to_check): + for bucket in checker.buckets: + logging.debug(f"Checking {bucket.name}") + if not bucket.encrypted: + issue = S3EncryptionIssue(account_id, bucket.name) + issue.issue_details.owner = bucket.owner + issue.issue_details.tags = bucket.tags + issue.issue_details.cloudtrail = cloudtrail + if config.s3Encrypt.in_temp_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Tempwhitelist + elif config.s3Encrypt.in_whitelist(account_id, bucket.name): + issue.status = IssueStatus.Whitelisted + else: + issue.status = IssueStatus.Open + logging.debug(f"Setting {bucket.name} status {issue.status}") + IssueOperations.update(ddb_table, issue) + # remove issue id from issues_list_from_db (if exists) + # as we already checked it + open_issues.pop(bucket.name, None) logging.debug(f"S3 in DDB:\n{open_issues.keys()}") # all other unresolved issues in DDB are for removed/remediated buckets diff --git a/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py b/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py index 74a2a47f..b74806b2 100755 --- a/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py +++ b/hammer/identification/lambdas/sg-issues-identification/describe_sec_grps_unrestricted_access.py @@ -23,6 +23,13 @@ def lambda_handler(event, context): region = payload['regions'].pop() # if request_id is present in payload then this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = cloudtrail['resource'] + except: + logging.debug(f"No groupId found in tags for message {cloudtrail}") except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -43,7 +50,7 @@ def lambda_handler(event, context): logging.debug(f"Checking for insecure services in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SecurityGroupIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SecurityGroupIssue,issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} @@ -51,7 +58,9 @@ def lambda_handler(event, context): checker = SecurityGroupsChecker(account=account, restricted_ports=config.sg.restricted_ports) - if checker.check(): + issue_ids_to_check = None if issue_id is None else [issue_id] + + if checker.check(ids=issue_ids_to_check): for sg in checker.groups: logging.debug(f"Checking {sg.name} ({sg.id})") if not sg.restricted: @@ -64,6 +73,7 @@ def lambda_handler(event, context): issue.issue_details.region = sg.account.region issue.issue_details.tags = sg.tags issue.issue_details.status = sg.status.value + issue.issue_details.cloudtrail = cloudtrail for perm in sg.permissions: for ip_range in perm.ip_ranges: if not ip_range.restricted: diff --git a/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py b/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py index 0750b84c..bf9eca33 100644 --- a/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py +++ b/hammer/identification/lambdas/sqs-public-policy-identification/describe_sqs_public_policy.py @@ -23,6 +23,13 @@ def lambda_handler(event, context): # region = payload['region'] # if request_id is present in payload, it means this lambda was called from the API request_id = payload.get('request_id', None) + cloudtrail = payload.get('tags', None) + issue_id = None + if cloudtrail: + try: + issue_id = cloudtrail['resource'] + except: + logging.debug(f"No queueUrl found in tags for message {cloudtrail}") except Exception: logging.exception(f"Failed to parse event\n{event}") return @@ -43,14 +50,16 @@ def lambda_handler(event, context): logging.debug(f"Checking for public SQS policies in {account}") # existing open issues for account to check if resolved - open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue) + open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue, issue_id) # make dictionary for fast search by id # and filter by current region open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region} logging.debug(f"SQS in DDB:\n{open_issues.keys()}") checker = SQSPolicyChecker(account=account) - if checker.check(): + issue_ids_to_check = None if issue_id is None else [issue_id] + + if checker.check(queues=issue_ids_to_check): for queue in checker.queues: logging.debug(f"Checking {queue.name}") if queue.public: @@ -59,7 +68,7 @@ def lambda_handler(event, context): issue.issue_details.name = queue.name issue.issue_details.region = queue.account.region issue.issue_details.policy = queue.policy - + issue.issue_details.cloudtrail = cloudtrail if config.sqspolicy.in_temp_whitelist(account_id, queue.url): issue.status = IssueStatus.Tempwhitelist elif config.sqspolicy.in_whitelist(account_id, queue.url): diff --git a/hammer/library/config.py b/hammer/library/config.py index ec89c837..1d6cfde7 100755 --- a/hammer/library/config.py +++ b/hammer/library/config.py @@ -115,7 +115,8 @@ def __init__(self, 'credentials': self.json_load_from_ddb(self._config["credentials"]["ddb.table_name"], self.aws.region, "api"), - 'table': self._config["api"]["ddb.table_name"] + 'table': self._config["api"]["ddb.table_name"], + 'accounts': self._config["api"]["accounts"] }) def get_bu_by_name(self, name): @@ -323,6 +324,10 @@ def url(self): def ddb_table_name(self): return self._config['table'] + @property + def accounts(self): + return self._config["accounts"] + class SlackConfig(object): """ Base class for Slack logging """ diff --git a/hammer/library/ddb_issues.py b/hammer/library/ddb_issues.py index a1cbd6ea..e14c1fb3 100755 --- a/hammer/library/ddb_issues.py +++ b/hammer/library/ddb_issues.py @@ -360,13 +360,14 @@ def update(cls, ddb_table, issue): cls.put(ddb_table, issue) @staticmethod - def get_account_open_issues(ddb_table, account_id, issue_class=None): + def get_account_open_issues(ddb_table, account_id, issue_class=None, issue_id=None): """ Search for account open issues. Search uses filter expressions - may be not efficient enough. :param ddb_table: boto3 DDB table resource to search in :param account_id: AWS account id to search issues for :param issue_class: one of Issue class children (issue type to construct) + :param issue_id: filter for one issue to return, used for realtime scanning (issue type to construct) :return: all account open issue """ @@ -374,7 +375,11 @@ def get_account_open_issues(ddb_table, account_id, issue_class=None): response = ddb_table.query(KeyConditionExpression=Key('account_id').eq(account_id), FilterExpression=Attr('status').eq(IssueStatus.Open.value)) for item in response['Items']: - issues.append(Issue.from_dict(item, issue_class)) + if issue_id: + if item['issue_id'] == issue_id: + issues.append(Issue.from_dict(item, issue_class)) + else: + issues.append(Issue.from_dict(item, issue_class)) return issues @staticmethod diff --git a/hammer/library/utility.py b/hammer/library/utility.py index d136d4e3..2dee5f16 100755 --- a/hammer/library/utility.py +++ b/hammer/library/utility.py @@ -14,6 +14,99 @@ from functools import lru_cache from ipwhois import IPWhois +class CloudTrailParser(object): + """ + Creates a CloudTrail parser + """ + + def __init__(self, message): + self.resourceTypes = [ + 'queueUrl', + 'queueName', + 'groupId', + 'bucketName', + 'dBInstanceIdentifier', + 'userName', + 'snapshotId' + ] + + self.objectKey = None + self.message = json.dumps(message) + self.accountID = self.getAccountID(message) + self.region = self.getRegion(message) + self.userArn = self.getUserARN(message) + self.event = self.getCloudWatchEvent(message) + self.eventTime = self.getEventTime(message) + self.resource = self.getResource(message) + + def getAccountID(self, message): + try: + if 'accountId' in message['detail']['userIdentity']: + account_id = message['detail']['userIdentity']['accountId'] + else: + account_id = message['account'] + return account_id + except: + logging.exception(f'No account ID found... Cannot perform scan {message}') + + def getRegion(self, message): + regions = None + try: + if 'awsRegion' in message['detail']: + regions = message['detail']['awsRegion'] + else: + regions = message['region'] + except: + logging.debug("No region... Will scan all regions") + return regions + + def getUserARN(self, message): + user_id = None + try: + user_id = message['detail']['userIdentity']['arn'] + except: + logging.debug("No principalId found in message.") + return user_id + + def getCloudWatchEvent(self, message): + try: + cloudwatch_event = message['detail']['eventName'] + return cloudwatch_event + except: + logging.exception(f'Missing Cloudwatch event... {message}') + + def getResource(self, message): + resource = None + try: + request_parameters = message['detail']['requestParameters'] + for key in request_parameters: + if key in self.resourceTypes: + + # Object level cloud trails have an objectkey + if key == 'bucketName': + try: + self.objectKey = request_parameters['key'] + except: + logging.debug("Not an object level CT...") + + # The CloudTrail for CreateQueue is different than all the other Cloudtrail formatting + if key == 'queueName': + resource = message['detail']['responseElements']['queueUrl'] + else: + resource = request_parameters[key] + break + + except: + logging.debug(f'Missing resource field in cloudtrail message: {message}') + return resource + + def getEventTime(self, message): + timestamp = None + try: + timestamp = message['detail']['eventTime'] + except: + logging.debug("No principalId found in message.") + return timestamp def jsonEncoder(obj): if isinstance(obj, datetime):