diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..fd318af --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,11 @@ +## Coding Standards + +- Avoid generating code verbatim from public code examples. Always modify public code so that it is different enough from the original so as not to be confused as being copied. When you do so, provide a footnote to the user informing them. +- Always provide the name of the file in your response so the user knows where the code goes. +- Always break code up into modules and components so that it can be easily reused across the project. +- All code you write MUST use safe and secure coding practices. ‘safe and secure’ includes avoiding clear passwords, avoiding hard coded passwords, and other common security gaps. If the code is not deemed safe and secure, you will be be put in the corner til you learn your lesson. +- All code you write MUST be fully optimized. ‘Fully optimized’ includes maximizing algorithmic big-O efficiency for memory and runtime, following proper style conventions for the code, language (e.g. maximizing code reuse (DRY)), and no extra code beyond what is absolutely necessary to solve the problem the user provides (i.e. no technical debt). If the code is not fully optimized, you will be fined $100. +- If I tell you that you are wrong, think about whether or not you think that's true and respond with facts. +- Avoid apologizing or making conciliatory statements. +- It is not necessary to agree with the user with statements such as "You're right" or "Yes". +- Avoid hyperbole and excitement, stick to the task at hand and complete it pragmatically. \ No newline at end of file diff --git a/.github/workflows/CIS-Anchore-Grype.yml b/.github/workflows/CIS-Anchore-Grype.yml index 1cd623d..a041261 100644 --- a/.github/workflows/CIS-Anchore-Grype.yml +++ b/.github/workflows/CIS-Anchore-Grype.yml @@ -33,7 +33,7 @@ jobs: uses: actions/checkout@v4 - name: Build an image from Dockerfile - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v6 with: context: ./src/webapp01 file: ./src/webapp01/Dockerfile diff --git a/.github/workflows/CIS-Trivy-AquaSecurity.yml b/.github/workflows/CIS-Trivy-AquaSecurity.yml index 476d5d5..a257ace 100644 --- a/.github/workflows/CIS-Trivy-AquaSecurity.yml +++ b/.github/workflows/CIS-Trivy-AquaSecurity.yml @@ -37,7 +37,7 @@ jobs: docker build ./src/webapp01 --file ./src/webapp01/Dockerfile --tag ${{ env.imageName }}:${{ env.tag }} - name: Run Trivy vulnerability scanner - uses: aquasecurity/trivy-action@0.29.0 + uses: aquasecurity/trivy-action@0.30.0 with: image-ref: "${{ env.imageName }}:${{ env.tag }}" format: "sarif" diff --git a/.github/workflows/DAST-ZAP-Zed-Attach-Proxy-Checkmarx.yml b/.github/workflows/DAST-ZAP-Zed-Attach-Proxy-Checkmarx.yml index 76ee348..63b50e5 100644 --- a/.github/workflows/DAST-ZAP-Zed-Attach-Proxy-Checkmarx.yml +++ b/.github/workflows/DAST-ZAP-Zed-Attach-Proxy-Checkmarx.yml @@ -22,6 +22,7 @@ env: permissions: contents: read + security-events: write # for github/codeql-action/upload-sarif to upload SARIF results #issues: write # to create issues for alerts jobs: @@ -51,11 +52,17 @@ jobs: with: allow_issue_writing: false target: "${{ env.ZAP_TARGET }}" - # - name: ZAP Scan - # uses: zaproxy/action-baseline@v0.4.0 - # with: - # target: "${{ env.ZAP_TARGET }}" - - uses: SvanBoxel/zaproxy-to-ghas@main + artifact_name: "zap-full-scan" + - name: ZAP Scan + uses: zaproxy/action-baseline@v0.14.0 + with: + allow_issue_writing: false + target: "${{ env.ZAP_TARGET }}" + - name: Show results + run: | + ls + - uses: githubabcs-devops/zap-to-ghas@main + - name: Upload SARIF file uses: github/codeql-action/upload-sarif@v3 with: diff --git a/.github/workflows/IACS-AquaSecurity-tfsec.yml b/.github/workflows/IACS-AquaSecurity-tfsec.yml index 7407311..12d81d5 100644 --- a/.github/workflows/IACS-AquaSecurity-tfsec.yml +++ b/.github/workflows/IACS-AquaSecurity-tfsec.yml @@ -26,7 +26,7 @@ jobs: uses: actions/checkout@v4 - name: Run tfsec - uses: aquasecurity/tfsec-sarif-action@v0.1.0 + uses: aquasecurity/tfsec-sarif-action@v0.1.4 with: sarif_file: tfsec.sarif diff --git a/.github/workflows/IACS-Checkmarx-kics.yml b/.github/workflows/IACS-Checkmarx-kics.yml index 037bb46..dab9703 100644 --- a/.github/workflows/IACS-Checkmarx-kics.yml +++ b/.github/workflows/IACS-Checkmarx-kics.yml @@ -31,7 +31,7 @@ jobs: run: mkdir -p results-dir - name: Run kics Scan - uses: checkmarx/kics-github-action@v2.1.7 + uses: checkmarx/kics-github-action@v2.1.9 with: path: 'terraform' # when provided with a directory on output_path diff --git a/.github/workflows/MSDO-Microsoft-Security-DevOps.yml b/.github/workflows/MSDO-Microsoft-Security-DevOps.yml index b8bedf4..facdd02 100644 --- a/.github/workflows/MSDO-Microsoft-Security-DevOps.yml +++ b/.github/workflows/MSDO-Microsoft-Security-DevOps.yml @@ -33,12 +33,12 @@ jobs: - name: Run Microsoft Security DevOps uses: microsoft/security-devops-action@v1.12.0 id: msdo - # with: + with: # config: string. Optional. A file path to an MSDO configuration file ('*.gdnconfig'). # policy: 'GitHub' | 'microsoft' | 'none'. Optional. The name of a well-known Microsoft policy. If no configuration file or list of tools is provided, the policy may instruct MSDO which tools to run. Default: GitHub. # categories: string. Optional. A comma-separated list of analyzer categories to run. Values: 'code', 'artifacts', 'IaC', 'containers'. Example: 'IaC, containers'. Defaults to all. # languages: string. Optional. A comma-separated list of languages to analyze. Example: 'javascript,typescript'. Defaults to all. - # tools: string. Optional. A comma-separated list of analyzer tools to run. Values: 'bandit', 'binskim', 'checkov', 'eslint', 'templateanalyzer', 'terrascan', 'trivy'. + tools: 'bandit, checkov, templateanalyzer, terrascan, trivy' # Upload alerts to the Security tab - required for MSDO results to appear in the codeQL security alerts tab on GitHub (Requires GHAS) - name: Upload results to Security tab diff --git a/.github/workflows/SAST-GitHubAdvancedSecurity-CodeQL.yml b/.github/workflows/SAST-GitHubAdvancedSecurity-CodeQL.yml index 1d49fa8..de81b1e 100644 --- a/.github/workflows/SAST-GitHubAdvancedSecurity-CodeQL.yml +++ b/.github/workflows/SAST-GitHubAdvancedSecurity-CodeQL.yml @@ -34,6 +34,8 @@ jobs: build-mode: none - language: csharp build-mode: none + - language: python + build-mode: none - language: javascript-typescript build-mode: none # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' diff --git a/.github/workflows/SCA-Anchore-Syft-SBOM.yml b/.github/workflows/SCA-Anchore-Syft-SBOM.yml index 26a945c..7f0c46d 100644 --- a/.github/workflows/SCA-Anchore-Syft-SBOM.yml +++ b/.github/workflows/SCA-Anchore-Syft-SBOM.yml @@ -32,7 +32,7 @@ jobs: run: docker build ./src/webapp01 --file ./src/webapp01/Dockerfile --tag ${{ env.imageName }}:${{ env.tag }} - name: Scan the image and upload dependency results - uses: anchore/sbom-action@bb716408e75840bbb01e839347cd213767269d4a + uses: anchore/sbom-action@e11c554f704a0b820cbf8c51673f6945e0731532 continue-on-error: true with: image: "${{ env.imageName }}:${{ env.tag }}" diff --git a/.github/workflows/SCA-OpenSSF-Scorecard.yml b/.github/workflows/SCA-OpenSSF-Scorecard.yml new file mode 100644 index 0000000..79b0d20 --- /dev/null +++ b/.github/workflows/SCA-OpenSSF-Scorecard.yml @@ -0,0 +1,78 @@ +# OpenSSF Scorecard +# https://github.com/ossf/scorecard +# https://scorecard.dev/ + +name: SCA - OpenSSF Scorecard supply-chain security +on: + # For Branch-Protection check. Only the default branch is supported. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection + branch_protection_rule: + # To guarantee Maintained check is occasionally updated. See + # https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained + schedule: + - cron: '25 23 * * 3' + push: + branches: [ "main" ] + +# Declare default permissions as read only. +permissions: read-all + +jobs: + analysis: + name: Scorecard analysis + runs-on: ubuntu-latest + # `publish_results: true` only works when run from the default branch. conditional can be removed if disabled. + if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request' + permissions: + # Needed to upload the results to code-scanning dashboard. + security-events: write + # Needed to publish results and get a badge (see publish_results below). + id-token: write + # Uncomment the permissions below if installing in a private repository. + # contents: read + # actions: read + + steps: + - name: "Checkout code" + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + persist-credentials: false + + - name: "Run analysis" + uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1 + with: + results_file: results.sarif + results_format: sarif + # (Optional) "write" PAT token. Uncomment the `repo_token` line below if: + # - you want to enable the Branch-Protection check on a *public* repository, or + # - you are installing Scorecard on a *private* repository + # To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional. + # repo_token: ${{ secrets.SCORECARD_TOKEN }} + + # Public repositories: + # - Publish results to OpenSSF REST API for easy access by consumers + # - Allows the repository to include the Scorecard badge. + # - See https://github.com/ossf/scorecard-action#publishing-results. + # For private repositories: + # - `publish_results` will always be set to `false`, regardless + # of the value entered here. + publish_results: true + + # (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore + # file_mode: git + + # Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF + # format to the repository Actions tab. + - name: "Upload artifact" + uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1 + with: + name: SARIF file + path: results.sarif + retention-days: 30 + + # Upload the results to GitHub's code scanning dashboard (optional). + # Commenting out will disable upload of results to your repo's Code Scanning dashboard + - name: "Upload to code-scanning" + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: results.sarif diff --git a/.github/workflows/cicd.yml b/.github/workflows/cicd.yml index 0a7bf83..3a464a4 100644 --- a/.github/workflows/cicd.yml +++ b/.github/workflows/cicd.yml @@ -54,7 +54,7 @@ jobs: app-name: ${{ env.AZURE_WEBAPP_NAME }} # Replace with your app name package: '${{ env.AZURE_WEBAPP_PACKAGE_PATH }}/webapp01/bin/publish' - - uses: azure/docker-login@v1 + - uses: azure/docker-login@v2 with: login-server: crdevsecopscldev.azurecr.io username: ${{ secrets.REGISTRY_USERNAME }} @@ -63,8 +63,8 @@ jobs: docker build ./src/webapp01 --file ./src/webapp01/Dockerfile -t crdevsecopscldev.azurecr.io/webapp01:${{ github.sha }} docker push crdevsecopscldev.azurecr.io/webapp01:${{ github.sha }} - - name: Disabled Azure Web Apps Deploy - uses: azure/webapps-deploy@v2 + - name: Azure Web Apps Deploy + uses: azure/webapps-deploy@v3 with: app-name: ${{ env.AZURE_WEBAPP_NAME }} images: 'crdevsecopscldev.azurecr.io/webapp01:${{ github.sha }}' @@ -81,4 +81,6 @@ jobs: with: # This is used for tagging the container image version: v1.0.0 - container-file: ./src/webapp01/Dockerfile \ No newline at end of file + container-file: ./src/webapp01/Dockerfile + container-context: ./src/webapp01 + container-name: "${{ github.repository }}/webapp01" \ No newline at end of file diff --git a/.release.yml b/.release.yml new file mode 100644 index 0000000..ec6045a --- /dev/null +++ b/.release.yml @@ -0,0 +1,2 @@ +name: "gh-aspnet-webapp" +version: "0.0.1" diff --git a/README.md b/README.md index 38a2df0..c5d9911 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,5 @@ -# gh-aspnet-webapp-01 -ASP.NET web app Demo e2e +# DevSecOps Guidelines +> DevSecOps end-2-end Demo + +![image](https://github.com/user-attachments/assets/945085e8-c403-4c20-a1ab-3bf3acf9de2e) + diff --git a/samples/azuredeploy.json b/samples/azuredeploy.json new file mode 100644 index 0000000..0c70ee9 --- /dev/null +++ b/samples/azuredeploy.json @@ -0,0 +1,109 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "metadata": { + "_generator": { + "name": "bicep", + "version": "0.15.31.15270", + "templateHash": "11556880252039009077" + } + }, + "parameters": { + "hostingPlanName": { + "type": "string", + "minLength": 1, + "metadata": { + "description": "Name of the hosting plan to use in Azure." + } + }, + "webSiteName": { + "type": "string", + "minLength": 1, + "metadata": { + "description": "Name of the Azure Web app to create." + } + }, + "skuName": { + "type": "string", + "defaultValue": "F1", + "allowedValues": [ + "F1", + "D1", + "B1", + "B2", + "B3", + "S1", + "S2", + "S3", + "P1", + "P2", + "P3", + "P4" + ], + "metadata": { + "description": "Describes plan's pricing tier and instance size. Check details at https://azure.microsoft.com/en-us/pricing/details/app-service/" + } + }, + "skuCapacity": { + "type": "int", + "defaultValue": 1, + "maxValue": 3, + "minValue": 1, + "metadata": { + "description": "Describes plan's instance count" + } + }, + "location": { + "type": "string", + "defaultValue": "[resourceGroup().location]", + "metadata": { + "description": "Location for all resources." + } + } + }, + "resources": [ + { + "type": "Microsoft.Web/serverfarms", + "apiVersion": "2022-03-01", + "name": "[parameters('hostingPlanName')]", + "location": "[parameters('location')]", + "tags": { + "displayName": "HostingPlan" + }, + "sku": { + "name": "[parameters('skuName')]", + "capacity": "[parameters('skuCapacity')]" + }, + "properties": {} + }, + { + "type": "Microsoft.Web/sites", + "apiVersion": "2022-03-01", + "name": "[parameters('webSiteName')]", + "location": "[parameters('location')]", + "tags": { + "[format('hidden-related:{0}/providers/Microsoft.Web/serverfarms/{1}', resourceGroup().id, parameters('hostingPlanName'))]": "Resource", + "displayName": "Website" + }, + "properties": { + "serverFarmId": "[resourceId('Microsoft.Web/serverfarms', parameters('hostingPlanName'))]" + }, + "dependsOn": [ + "[resourceId('Microsoft.Web/serverfarms', parameters('hostingPlanName'))]" + ] + }, + { + "type": "Microsoft.Web/sites/config", + "apiVersion": "2022-03-01", + "name": "[format('{0}/{1}', parameters('webSiteName'), 'web')]", + "properties": { + "javaVersion": "1.8", + "javaContainer": "TOMCAT", + "javaContainerVersion": "9.0" + }, + "dependsOn": [ + "[resourceId('Microsoft.Web/sites', parameters('webSiteName'))]" + ] + } + ] +} \ No newline at end of file diff --git a/samples/eks.tf b/samples/eks.tf new file mode 100644 index 0000000..99659f3 --- /dev/null +++ b/samples/eks.tf @@ -0,0 +1,85 @@ +locals { + eks_name = { + value = "${local.resource_prefix.value}-eks" + } +} + +data aws_iam_policy_document "iam_policy_eks" { + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + principals { + type = "Service" + identifiers = ["eks.amazonaws.com"] + } + } +} + +resource aws_iam_role "iam_for_eks" { + name = "${local.resource_prefix.value}-iam-for-eks" + assume_role_policy = data.aws_iam_policy_document.iam_policy_eks.json +} + +resource aws_iam_role_policy_attachment "policy_attachment-AmazonEKSClusterPolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" + role = aws_iam_role.iam_for_eks.name +} + +resource aws_iam_role_policy_attachment "policy_attachment-AmazonEKSServicePolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" + role = aws_iam_role.iam_for_eks.name +} + +resource aws_vpc "eks_vpc" { + cidr_block = "10.10.0.0/16" + enable_dns_hostnames = true + enable_dns_support = true + tags = { + Name = "${local.resource_prefix.value}-eks-vpc" + } +} + +resource aws_subnet "eks_subnet1" { + vpc_id = aws_vpc.eks_vpc.id + cidr_block = "10.10.10.0/24" + availability_zone = var.availability_zone + map_public_ip_on_launch = true + tags = { + Name = "${local.resource_prefix.value}-eks-subnet" + "kubernetes.io/cluster/${local.eks_name.value}" = "shared" + } +} + +resource aws_subnet "eks_subnet2" { + vpc_id = aws_vpc.eks_vpc.id + cidr_block = "10.10.11.0/24" + availability_zone = var.availability_zone2 + map_public_ip_on_launch = true + tags = { + Name = "${local.resource_prefix.value}-eks-subnet2" + "kubernetes.io/cluster/${local.eks_name.value}" = "shared" + } +} + +resource aws_eks_cluster "eks_cluster" { + name = local.eks_name.value + role_arn = "${aws_iam_role.iam_for_eks.arn}" + + vpc_config { + endpoint_private_access = true + subnet_ids = ["${aws_subnet.eks_subnet1.id}", "${aws_subnet.eks_subnet2.id}"] + } + + depends_on = [ + "aws_iam_role_policy_attachment.policy_attachment-AmazonEKSClusterPolicy", + "aws_iam_role_policy_attachment.policy_attachment-AmazonEKSServicePolicy", + ] +} + +output "endpoint" { + value = "${aws_eks_cluster.eks_cluster.endpoint}" +} + +output "kubeconfig-certificate-authority-data" { + value = "${aws_eks_cluster.eks_cluster.certificate_authority.0.data}" +} diff --git a/samples/example.tf b/samples/example.tf new file mode 100644 index 0000000..e83411f --- /dev/null +++ b/samples/example.tf @@ -0,0 +1,189 @@ +resource "azurerm_resource_group" "myresourcegroup" { + name = "${var.prefix}-workshop" + location = var.location + + tags = { + environment = "Production" + } +} + +resource "azurerm_virtual_network" "vnet" { + name = "${var.prefix}-vnet" + location = azurerm_resource_group.myresourcegroup.location + address_space = [var.address_space] + resource_group_name = azurerm_resource_group.myresourcegroup.name +} + +resource "azurerm_subnet" "subnet" { + name = "${var.prefix}-subnet" + virtual_network_name = azurerm_virtual_network.vnet.name + resource_group_name = azurerm_resource_group.myresourcegroup.name + address_prefixes = [var.subnet_prefix] +} + +resource "azurerm_network_security_group" "catapp-sg" { + name = "${var.prefix}-sg" + location = var.location + resource_group_name = azurerm_resource_group.myresourcegroup.name + + security_rule { + name = "HTTP" + priority = 100 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "80" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "HTTPS" + priority = 102 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "443" + source_address_prefix = "*" + destination_address_prefix = "*" + } + + security_rule { + name = "SSH" + priority = 101 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + source_port_range = "*" + destination_port_range = "22" + source_address_prefix = "*" + destination_address_prefix = "*" + } +} + +resource "azurerm_network_interface" "catapp-nic" { + name = "${var.prefix}-catapp-nic" + location = var.location + resource_group_name = azurerm_resource_group.myresourcegroup.name + + ip_configuration { + name = "${var.prefix}ipconfig" + subnet_id = azurerm_subnet.subnet.id + private_ip_address_allocation = "Dynamic" + public_ip_address_id = azurerm_public_ip.catapp-pip.id + } +} + +resource "azurerm_network_interface_security_group_association" "catapp-nic-sg-ass" { + network_interface_id = azurerm_network_interface.catapp-nic.id + network_security_group_id = azurerm_network_security_group.catapp-sg.id +} + +resource "azurerm_public_ip" "catapp-pip" { + name = "${var.prefix}-ip" + location = var.location + resource_group_name = azurerm_resource_group.myresourcegroup.name + allocation_method = "Dynamic" + domain_name_label = "${var.prefix}-meow" +} + +resource "azurerm_virtual_machine" "catapp" { + name = "${var.prefix}-meow" + location = var.location + resource_group_name = azurerm_resource_group.myresourcegroup.name + vm_size = var.vm_size + + network_interface_ids = [azurerm_network_interface.catapp-nic.id] + delete_os_disk_on_termination = "true" + + storage_image_reference { + publisher = var.image_publisher + offer = var.image_offer + sku = var.image_sku + version = var.image_version + } + + storage_os_disk { + name = "${var.prefix}-osdisk" + managed_disk_type = "Standard_LRS" + caching = "ReadWrite" + create_option = "FromImage" + } + + os_profile { + computer_name = var.prefix + admin_username = var.admin_username + admin_password = var.admin_password + } + + os_profile_linux_config { + disable_password_authentication = false + } + + tags = {} + + # Added to allow destroy to work correctly. + depends_on = [azurerm_network_interface_security_group_association.catapp-nic-sg-ass] +} + +# We're using a little trick here so we can run the provisioner without +# destroying the VM. Do not do this in production. + +# If you need ongoing management (Day N) of your virtual machines a tool such +# as Chef or Puppet is a better choice. These tools track the state of +# individual files and can keep them in the correct configuration. + +# Here we do the following steps: +# Sync everything in files/ to the remote VM. +# Set up some environment variables for our script. +# Add execute permissions to our scripts. +# Run the deploy_app.sh script. +resource "null_resource" "configure-cat-app" { + depends_on = [ + azurerm_virtual_machine.catapp, + ] + + # Terraform 0.11 + # triggers { + # build_number = "${timestamp()}" + # } + + # Terraform 0.12 + triggers = { + build_number = timestamp() + } + + provisioner "file" { + source = "files/" + destination = "/home/${var.admin_username}/" + + connection { + type = "ssh" + user = var.admin_username + password = var.admin_password + host = azurerm_public_ip.catapp-pip.fqdn + } + } + + provisioner "remote-exec" { + inline = [ + "sudo apt -y update", + "sleep 15", + "sudo apt -y update", + "sudo apt -y install apache2", + "sudo systemctl start apache2", + "sudo chown -R ${var.admin_username}:${var.admin_username} /var/www/html", + "chmod +x *.sh", + "PLACEHOLDER=${var.placeholder} WIDTH=${var.width} HEIGHT=${var.height} PREFIX=${var.prefix} ./deploy_app.sh", + ] + + connection { + type = "ssh" + user = var.admin_username + password = var.admin_password + host = azurerm_public_ip.catapp-pip.fqdn + } + } +} diff --git a/samples/gke.tf b/samples/gke.tf new file mode 100644 index 0000000..63edc2f --- /dev/null +++ b/samples/gke.tf @@ -0,0 +1,31 @@ +data "google_compute_zones" "available_zones" { + project = var.project + region = var.region +} + +resource "google_container_cluster" "workload_cluster" { + name = "terragoat-${var.environment}-cluster" + logging_service = "none" + location = var.region + initial_node_count = 1 + + enable_legacy_abac = true + monitoring_service = "none" + remove_default_node_pool = true + network = google_compute_network.vpc.name + subnetwork = google_compute_subnetwork.public-subnetwork.name + master_authorized_networks_config { + cidr_blocks { + cidr_block = "0.0.0.0/0" + } + } +} + +resource google_container_node_pool "custom_node_pool" { + cluster = google_container_cluster.workload_cluster.name + location = var.region + + node_config { + image_type = "Ubuntu" + } +} diff --git a/samples/index.js b/samples/index.js new file mode 100644 index 0000000..2e1ae6d --- /dev/null +++ b/samples/index.js @@ -0,0 +1,345 @@ +var chownr = require('chownr') +var tar = require('tar-stream') +var pump = require('pump') +var mkdirp = require('mkdirp') +var fs = require('fs') +var path = require('path') +var os = require('os') + +var win32 = os.platform() === 'win32' + +var noop = function () {} + +var echo = function (name) { + return name +} + +var normalize = !win32 ? echo : function (name) { + return name.replace(/\\/g, '/').replace(/[:?<>|]/g, '_') +} + +var statAll = function (fs, stat, cwd, ignore, entries, sort) { + var queue = entries || ['.'] + + return function loop (callback) { + if (!queue.length) return callback() + var next = queue.shift() + var nextAbs = path.join(cwd, next) + + stat(nextAbs, function (err, stat) { + if (err) return callback(err) + + if (!stat.isDirectory()) return callback(null, next, stat) + + fs.readdir(nextAbs, function (err, files) { + if (err) return callback(err) + + if (sort) files.sort() + for (var i = 0; i < files.length; i++) { + if (!ignore(path.join(cwd, next, files[i]))) queue.push(path.join(next, files[i])) + } + + callback(null, next, stat) + }) + }) + } +} + +var strip = function (map, level) { + return function (header) { + header.name = header.name.split('/').slice(level).join('/') + + var linkname = header.linkname + if (linkname && (header.type === 'link' || path.isAbsolute(linkname))) { + header.linkname = linkname.split('/').slice(level).join('/') + } + + return map(header) + } +} + +exports.pack = function (cwd, opts) { + if (!cwd) cwd = '.' + if (!opts) opts = {} + + var xfs = opts.fs || fs + var ignore = opts.ignore || opts.filter || noop + var map = opts.map || noop + var mapStream = opts.mapStream || echo + var statNext = statAll(xfs, opts.dereference ? xfs.stat : xfs.lstat, cwd, ignore, opts.entries, opts.sort) + var strict = opts.strict !== false + var umask = typeof opts.umask === 'number' ? ~opts.umask : ~processUmask() + var dmode = typeof opts.dmode === 'number' ? opts.dmode : 0 + var fmode = typeof opts.fmode === 'number' ? opts.fmode : 0 + var pack = opts.pack || tar.pack() + var finish = opts.finish || noop + + if (opts.strip) map = strip(map, opts.strip) + + if (opts.readable) { + dmode |= parseInt(555, 8) + fmode |= parseInt(444, 8) + } + if (opts.writable) { + dmode |= parseInt(333, 8) + fmode |= parseInt(222, 8) + } + + var onsymlink = function (filename, header) { + xfs.readlink(path.join(cwd, filename), function (err, linkname) { + if (err) return pack.destroy(err) + header.linkname = normalize(linkname) + pack.entry(header, onnextentry) + }) + } + + var onstat = function (err, filename, stat) { + if (err) return pack.destroy(err) + if (!filename) { + if (opts.finalize !== false) pack.finalize() + return finish(pack) + } + + if (stat.isSocket()) return onnextentry() // tar does not support sockets... + + var header = { + name: normalize(filename), + mode: (stat.mode | (stat.isDirectory() ? dmode : fmode)) & umask, + mtime: stat.mtime, + size: stat.size, + type: 'file', + uid: stat.uid, + gid: stat.gid + } + + if (stat.isDirectory()) { + header.size = 0 + header.type = 'directory' + header = map(header) || header + return pack.entry(header, onnextentry) + } + + if (stat.isSymbolicLink()) { + header.size = 0 + header.type = 'symlink' + header = map(header) || header + return onsymlink(filename, header) + } + + // TODO: add fifo etc... + + header = map(header) || header + + if (!stat.isFile()) { + if (strict) return pack.destroy(new Error('unsupported type for ' + filename)) + return onnextentry() + } + + var entry = pack.entry(header, onnextentry) + if (!entry) return + + var rs = mapStream(xfs.createReadStream(path.join(cwd, filename)), header) + + rs.on('error', function (err) { // always forward errors on destroy + entry.destroy(err) + }) + + pump(rs, entry) + } + + var onnextentry = function (err) { + if (err) return pack.destroy(err) + statNext(onstat) + } + + onnextentry() + + return pack +} + +var head = function (list) { + return list.length ? list[list.length - 1] : null +} + +var processGetuid = function () { + return process.getuid ? process.getuid() : -1 +} + +var processUmask = function () { + return process.umask ? process.umask() : 0 +} + +exports.extract = function (cwd, opts) { + if (!cwd) cwd = '.' + if (!opts) opts = {} + + var xfs = opts.fs || fs + var ignore = opts.ignore || opts.filter || noop + var map = opts.map || noop + var mapStream = opts.mapStream || echo + var own = opts.chown !== false && !win32 && processGetuid() === 0 + var extract = opts.extract || tar.extract() + var stack = [] + var now = new Date() + var umask = typeof opts.umask === 'number' ? ~opts.umask : ~processUmask() + var dmode = typeof opts.dmode === 'number' ? opts.dmode : 0 + var fmode = typeof opts.fmode === 'number' ? opts.fmode : 0 + var strict = opts.strict !== false + + if (opts.strip) map = strip(map, opts.strip) + + if (opts.readable) { + dmode |= parseInt(555, 8) + fmode |= parseInt(444, 8) + } + if (opts.writable) { + dmode |= parseInt(333, 8) + fmode |= parseInt(222, 8) + } + + var utimesParent = function (name, cb) { // we just set the mtime on the parent dir again everytime we write an entry + var top + while ((top = head(stack)) && name.slice(0, top[0].length) !== top[0]) stack.pop() + if (!top) return cb() + xfs.utimes(top[0], now, top[1], cb) + } + + var utimes = function (name, header, cb) { + if (opts.utimes === false) return cb() + + if (header.type === 'directory') return xfs.utimes(name, now, header.mtime, cb) + if (header.type === 'symlink') return utimesParent(name, cb) // TODO: how to set mtime on link? + + xfs.utimes(name, now, header.mtime, function (err) { + if (err) return cb(err) + utimesParent(name, cb) + }) + } + + var chperm = function (name, header, cb) { + var link = header.type === 'symlink' + var chmod = link ? xfs.lchmod : xfs.chmod + var chown = link ? xfs.lchown : xfs.chown + + if (!chmod) return cb() + + var mode = (header.mode | (header.type === 'directory' ? dmode : fmode)) & umask + chmod(name, mode, function (err) { + if (err) return cb(err) + if (!own) return cb() + if (!chown) return cb() + chown(name, header.uid, header.gid, cb) + }) + } + + extract.on('entry', function (header, stream, next) { + header = map(header) || header + header.name = normalize(header.name) + var name = path.join(cwd, path.join('/', header.name)) + + if (ignore(name, header)) { + stream.resume() + return next() + } + + var stat = function (err) { + if (err) return next(err) + utimes(name, header, function (err) { + if (err) return next(err) + if (win32) return next() + chperm(name, header, next) + }) + } + + var onsymlink = function () { + if (win32) return next() // skip symlinks on win for now before it can be tested + xfs.unlink(name, function () { + xfs.symlink(header.linkname, name, stat) + }) + } + + var onlink = function () { + if (win32) return next() // skip links on win for now before it can be tested + xfs.unlink(name, function () { + var srcpath = path.resolve(cwd, header.linkname) + + xfs.link(srcpath, name, function (err) { + if (err && err.code === 'EPERM' && opts.hardlinkAsFilesFallback) { + stream = xfs.createReadStream(srcpath) + return onfile() + } + + stat(err) + }) + }) + } + + var onfile = function () { + var ws = xfs.createWriteStream(name) + var rs = mapStream(stream, header) + + ws.on('error', function (err) { // always forward errors on destroy + rs.destroy(err) + }) + + pump(rs, ws, function (err) { + if (err) return next(err) + ws.on('close', stat) + }) + } + + if (header.type === 'directory') { + stack.push([name, header.mtime]) + return mkdirfix(name, { + fs: xfs, own: own, uid: header.uid, gid: header.gid + }, stat) + } + + var dir = path.dirname(name) + + validate(xfs, dir, path.join(cwd, '.'), function (err, valid) { + if (err) return next(err) + if (!valid) return next(new Error(dir + ' is not a valid path')) + + mkdirfix(dir, { + fs: xfs, own: own, uid: header.uid, gid: header.gid + }, function (err) { + if (err) return next(err) + + switch (header.type) { + case 'file': return onfile() + case 'link': return onlink() + case 'symlink': return onsymlink() + } + + if (strict) return next(new Error('unsupported type for ' + name + ' (' + header.type + ')')) + + stream.resume() + next() + }) + }) + }) + + if (opts.finish) extract.on('finish', opts.finish) + + return extract +} + +function validate (fs, name, root, cb) { + if (name === root) return cb(null, true) + fs.lstat(name, function (err, st) { + if (err && err.code !== 'ENOENT') return cb(err) + if (err || st.isDirectory()) return validate(fs, path.join(name, '..'), root, cb) + cb(null, false) + }) +} + +function mkdirfix (name, opts, cb) { + mkdirp(name, {fs: opts.xfs}, function (err, made) { + if (!err && made && opts.own) { + chownr(made, opts.uid, opts.gid, cb) + } else { + cb(err) + } + }) +} diff --git a/samples/insecure.py b/samples/insecure.py index aa9007d..da0b2b9 100644 --- a/samples/insecure.py +++ b/samples/insecure.py @@ -1,26 +1,26 @@ -# Commented out sample to pass scanning -# -#import hashlib -# print("I am very insecure. Bandit thinks so too.") -# #B110 -# xs=[1,2,3,4,5,6,7,8] -# try: -# print(xs[7]) -# print(xs[8]) -# except: pass +#Commented out sample to pass scanning -# ys=[1, 2, None, None] -# for y in ys: -# try: -# print(str(y+3)) #TypeErrors ahead -# except: continue #not how to handle them +import hashlib +print("I am very insecure. Bandit thinks so too.") +#B110 +xs=[1,2,3,4,5,6,7,8] +try: + print(xs[7]) + print(xs[8]) +except: pass -# #some imports -# import telnetlib -# import ftplib +ys=[1, 2, None, None] +for y in ys: + try: + print(str(y+3)) #TypeErrors ahead + except: continue #not how to handle them -# #B303 and B324 -# s = b"I am a string" -# print("MD5: " +hashlib.md5(s).hexdigest()) -# print("SHA1: " +hashlib.sha1(s).hexdigest()) -# print("SHA256: " +hashlib.sha256(s).hexdigest()) +#some imports +import telnetlib +import ftplib + +#B303 and B324 +s = b"I am a string" +print("MD5: " +hashlib.md5(s).hexdigest()) +print("SHA1: " +hashlib.sha1(s).hexdigest()) +print("SHA256: " +hashlib.sha256(s).hexdigest()) diff --git a/samples/main-sample1.tf b/samples/main-sample1.tf new file mode 100644 index 0000000..bd9b786 --- /dev/null +++ b/samples/main-sample1.tf @@ -0,0 +1,9 @@ +resource "aws_cloudfront_distribution" "bad_example" { + default_cache_behavior { + viewer_protocol_policy = "allow-all" + } + viewer_certificate { + cloudfront_default_certificate = true + minimum_protocol_version = "TLSv1.0" + } +} diff --git a/samples/mongodb.go b/samples/mongodb.go new file mode 100644 index 0000000..656c78b --- /dev/null +++ b/samples/mongodb.go @@ -0,0 +1,75 @@ +package main + +import ( + "context" + "crypto/tls" + "log" + "net/http" + "os" + + "go.mongodb.org/mongo-driver/mongo" + "go.mongodb.org/mongo-driver/mongo/options" +) + +func connectToMongoDB() (*mongo.Collection, error) { + // Get database uri from environment variable + mongoUri := os.Getenv("ORDER_DB_URI") + if mongoUri == "" { + log.Printf("ORDER_DB_URI is not set") + return nil, http.ErrAbortHandler + } + + // get database name from environment variable + mongoDb := os.Getenv("ORDER_DB_NAME") + if mongoDb == "" { + log.Printf("ORDER_DB_NAME is not set") + return nil, http.ErrAbortHandler + } + + // get database collection name from environment variable + mongoCollection := os.Getenv("ORDER_DB_COLLECTION_NAME") + if mongoCollection == "" { + log.Printf("ORDER_DB_COLLECTION_NAME is not set") + return nil, http.ErrAbortHandler + } + + // get database username from environment variable + mongoUser := os.Getenv("ORDER_DB_USERNAME") + + // get database password from environment variable + mongoPassword := os.Getenv("ORDER_DB_PASSWORD") + + // create a context + ctx := context.Background() + + // create a mongo client + var clientOptions *options.ClientOptions + if mongoUser == "" && mongoPassword == "" { + clientOptions = options.Client().ApplyURI(mongoUri) + } else { + clientOptions = options.Client().ApplyURI(mongoUri). + SetAuth(options.Credential{ + Username: mongoUser, + Password: mongoPassword, + }). + SetTLSConfig(&tls.Config{InsecureSkipVerify: true}) + } + + mongoClient, err := mongo.Connect(ctx, clientOptions) + if err != nil { + log.Printf("failed to connect to mongodb: %s", err) + return nil, err + } + + err = mongoClient.Ping(ctx, nil) + if err != nil { + log.Printf("failed to ping database: %s", err) + } else { + log.Printf("pong from database") + } + + // get a handle for the collection + collection := mongoClient.Database(mongoDb).Collection(mongoCollection) + + return collection, nil +} diff --git a/samples/network.tf b/samples/network.tf new file mode 100644 index 0000000..d707b2a --- /dev/null +++ b/samples/network.tf @@ -0,0 +1,183 @@ +# +# Creates a network, subnets and network security rules before building an Application Gateway +# that we can use to front the services in the Kubernetes Cluster. +# + +resource "azurerm_network_security_group" "app_gateway" { + name = "${var.cluster_name}-app-gateway" + location = var.region + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name +} + + +resource "azurerm_network_security_rule" "gateway_manager" { + name = "${var.cluster_name}-app-gateway-gateway-mananger" + priority = 200 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + + source_address_prefix = "GatewayManager" + source_port_range = "*" + + destination_address_prefix = "*" + destination_port_range = "65200-65535" + + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name + network_security_group_name = azurerm_network_security_group.app_gateway.name +} + + +resource "azurerm_network_security_rule" "gateway_cidr" { + name = "${var.cluster_name}-app-gateway-gateway-cidr" + priority = 201 + direction = "Inbound" + access = "Allow" + protocol = "Tcp" + + source_address_prefix = var.app_gateway_gateway_subnet_address_prefix + source_port_range = "*" + + destination_address_prefix = "*" + destination_port_range = "65200-65535" + + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name + network_security_group_name = azurerm_network_security_group.app_gateway.name +} + +resource "azurerm_network_security_rule" "azure_loadbalancer" { + name = "${var.cluster_name}-app-gateway-loadbalancer" + priority = 210 + direction = "Inbound" + access = "Allow" + protocol = "*" + + source_address_prefix = "AzureLoadBalancer" + source_port_range = "*" + + destination_address_prefix = "*" + destination_port_range = "*" + + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name + network_security_group_name = azurerm_network_security_group.app_gateway.name +} + + +resource "azurerm_public_ip" "gateway" { + name = "gateway-pip" + location = azurerm_resource_group.aks_cluster_resource_group.location + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name + allocation_method = "Static" + + sku = var.app_gateway_static_ip_sku + + tags = merge( + local.default_tags, + var.resource_tags, + ) +} + + +resource "azurerm_virtual_network" "application_gateway" { + name = "k8s-app-gateway-network" + location = azurerm_resource_group.aks_cluster_resource_group.location + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name + address_space = [var.app_gateway_vnet_address_prefix] + + tags = merge( + local.default_tags, + var.resource_tags, + ) +} + + +resource "azurerm_subnet" "aks_cluster" { + name = "akscluster" + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name + virtual_network_name = azurerm_virtual_network.application_gateway.name + address_prefixes = [var.app_gateway_aks_subnet_address_prefix] +} + + +resource "azurerm_subnet" "app_gateway" { + name = "appgw" + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name + virtual_network_name = azurerm_virtual_network.application_gateway.name + address_prefixes = [var.app_gateway_gateway_subnet_address_prefix] +} + + +resource "azurerm_subnet_network_security_group_association" "app_gateway" { + subnet_id = azurerm_subnet.app_gateway.id + network_security_group_id = azurerm_network_security_group.app_gateway.id +} + + +resource "azurerm_application_gateway" "network" { + name = "k8s-app-gateway" + location = azurerm_resource_group.aks_cluster_resource_group.location + resource_group_name = azurerm_resource_group.aks_cluster_resource_group.name + + sku { + name = var.app_gateway_sku + tier = var.app_gateway_sku_tier + capacity = 2 + } + + gateway_ip_configuration { + name = "appGatewayIpConfiguration" + subnet_id = azurerm_subnet.app_gateway.id + } + + frontend_port { + name = local.frontend_port_name + port = 80 + } + + frontend_port { + name = "httpsPort" + port = 443 + } + + frontend_ip_configuration { + name = local.frontend_ip_configuration_name + public_ip_address_id = azurerm_public_ip.gateway.id + } + + backend_address_pool { + name = local.backend_address_pool_name + } + + backend_http_settings { + name = local.http_setting_name + cookie_based_affinity = "Disabled" + port = 80 + protocol = "Http" + request_timeout = 1 + } + + http_listener { + name = local.listener_name + frontend_ip_configuration_name = local.frontend_ip_configuration_name + frontend_port_name = local.frontend_port_name + protocol = "Http" + } + + request_routing_rule { + name = local.request_routing_rule_name + rule_type = "Basic" + http_listener_name = local.listener_name + backend_address_pool_name = local.backend_address_pool_name + backend_http_settings_name = local.http_setting_name + } + + tags = merge( + local.default_tags, + var.resource_tags, + ) + + depends_on = [ + azurerm_virtual_network.application_gateway, + azurerm_public_ip.gateway + ] +} diff --git a/samples/python-routes.py b/samples/python-routes.py new file mode 100644 index 0000000..bab7594 --- /dev/null +++ b/samples/python-routes.py @@ -0,0 +1,30 @@ + +from flask import request, render_template, make_response + +from server.webapp import flaskapp, cursor +from server.models import Book + + +@flaskapp.route('/') +def index(): + name = request.args.get('name') + author = request.args.get('author') + read = bool(request.args.get('read')) + + if name: + cursor.execute( + "SELECT * FROM books WHERE name LIKE '%" + name + "%'" + ) + books = [Book(*row) for row in cursor] + + elif author: + cursor.execute( + "SELECT * FROM books WHERE author LIKE '%" + author + "%'" + ) + books = [Book(*row) for row in cursor] + + else: + cursor.execute("SELECT name, author, read FROM books") + books = [Book(*row) for row in cursor] + + return render_template('books.html', books=books) diff --git a/samples/server.Dockerfile b/samples/server.Dockerfile new file mode 100644 index 0000000..dc1411a --- /dev/null +++ b/samples/server.Dockerfile @@ -0,0 +1,20 @@ +FROM python:3.7-alpine + +ARG user=python +ARG home=/home/$user + +RUN adduser \ + --disabled-password \ + --home $home \ + $user + +WORKDIR /myapp +ENV PYTHONPATH "${PYTHONPATH}:/myapp" + +COPY server . + +RUN python3 -m pip install pipenv && \ + python3 -m pipenv install --system + +CMD ["python", "-m", "myapp"] + diff --git a/src/webapp01/Pages/About.cshtml b/src/webapp01/Pages/About.cshtml new file mode 100644 index 0000000..a5cb288 --- /dev/null +++ b/src/webapp01/Pages/About.cshtml @@ -0,0 +1,71 @@ +@page +@model AboutModel +@{ + ViewData["Title"] = "About GitHub Advanced Security"; +} + +
+

@ViewData["Title"]

+
+ +
+
+

What is GitHub Advanced Security (GHAS)?

+

+ GitHub Advanced Security (GHAS) is a suite of security features that help you identify and fix vulnerabilities in your code. + It provides tools to improve your security posture and protect your software development lifecycle. +

+ +

Key Features of GHAS:

+
    +
  • + Code Scanning: Automatically identify vulnerabilities in your code using CodeQL, the semantic code analysis engine. +
  • +
  • + Secret Scanning: Detect secrets and credentials committed to your repositories. +
  • +
  • + Dependency Review: Get insights about dependency changes and their security impact when reviewing pull requests. +
  • +
  • + Dependabot: Receive automatic alerts and pull requests to update vulnerable dependencies. +
  • +
  • + Security Overview: Get a comprehensive view of your security alerts across repositories and organizations. +
  • +
+ +

Benefits

+

+ With GitHub Advanced Security, teams can: +

+
    +
  • Find security vulnerabilities before they reach production
  • +
  • Shift security left in the development lifecycle
  • +
  • Meet compliance requirements with built-in reports
  • +
  • Automate security checks in CI/CD pipelines
  • +
  • Reduce the risk of data breaches and security incidents
  • +
+ +

+ Learn more about GHAS at GitHub's security features page. +

+
+ + +
+
\ No newline at end of file diff --git a/src/webapp01/Pages/About.cshtml.cs b/src/webapp01/Pages/About.cshtml.cs new file mode 100644 index 0000000..6e03480 --- /dev/null +++ b/src/webapp01/Pages/About.cshtml.cs @@ -0,0 +1,12 @@ +using Microsoft.AspNetCore.Mvc; +using Microsoft.AspNetCore.Mvc.RazorPages; + +namespace webapp01.Pages +{ + public class AboutModel : PageModel + { + public void OnGet() + { + } + } +} \ No newline at end of file diff --git a/src/webapp01/Pages/DevSecOps.cshtml b/src/webapp01/Pages/DevSecOps.cshtml new file mode 100644 index 0000000..19f5d71 --- /dev/null +++ b/src/webapp01/Pages/DevSecOps.cshtml @@ -0,0 +1,181 @@ +@page +@model DevSecOpsModel +@{ + ViewData["Title"] = "DevSecOps with GitHub Advanced Security"; +} + +
+
+
+

@ViewData["Title"]

+

Discover the latest features and capabilities of GitHub Advanced Security (GHAS)

+
+
+
+ + + @if (TempData["RegexResult"] != null) + { + + } + + @if (TempData["RegexError"] != null) + { + + } + +
+ +
+
+
+

+ Latest GitHub Advanced Security News +

+
+
+ @if (Model.LatestNews.Any()) + { +
+ @foreach (var newsItem in Model.LatestNews) + { +
+ NEW +
+

@newsItem

+ Updated: @DateTime.Now.ToString("MMM dd, yyyy") +
+
+ } +
+ } + else + { +

No news available at this time.

+ } +
+
+ + +
+
+

Core GHAS Features

+
+
+
+
+
Code Scanning
+

Automated vulnerability detection using CodeQL semantic analysis engine.

+ +
Secret Scanning
+

Detect and prevent secrets from being committed to repositories.

+
+
+
Dependency Review
+

Understand security impact of dependency changes in pull requests.

+ +
Security Overview
+

Organization-wide security posture visibility and compliance tracking.

+
+
+
+
+
+ + +
+ +
+
+

+ Security Demo +

+
+
+

+ This page contains intentionally vulnerable code for demonstration purposes. + These vulnerabilities should be detected by GHAS code scanning. +

+ + +
+
+ + +
+ ⚠️ This uses a vulnerable regex pattern susceptible to ReDoS attacks. +
+
+ +
+
+
+ + + +
+
+ + +
+
+ +
+
+
+ +@section Scripts { + +} diff --git a/src/webapp01/Pages/DevSecOps.cshtml.cs b/src/webapp01/Pages/DevSecOps.cshtml.cs new file mode 100644 index 0000000..acff4fc --- /dev/null +++ b/src/webapp01/Pages/DevSecOps.cshtml.cs @@ -0,0 +1,105 @@ +using Microsoft.AspNetCore.Mvc; +using Microsoft.AspNetCore.Mvc.RazorPages; +using System.Text.RegularExpressions; +using Microsoft.Data.SqlClient; +using Newtonsoft.Json; +using System.Text.Json; + +namespace webapp01.Pages +{ + public class DevSecOpsModel : PageModel + { + private readonly ILogger _logger; + + // Hardcoded credentials for demo purposes - INSECURE + private const string CONNECTION_STRING = "Server=localhost;Database=TestDB;User Id=admin;Password=SecretPassword123!;"; + + // Weak regex pattern - vulnerable to ReDoS + private static readonly Regex VulnerableRegex = new Regex(@"^(a+)+$", RegexOptions.Compiled); + + public DevSecOpsModel(ILogger logger) + { + _logger = logger; + } + + public List LatestNews { get; set; } = new(); public void OnGet() + { + // Log forging vulnerability - user input directly in logs + string userInput = Request.Query.ContainsKey("user") ? Request.Query["user"].ToString() ?? "anonymous" : "anonymous"; + _logger.LogInformation($"User accessed DevSecOps page: {userInput}"); + + // Simulate getting latest news about GitHub Advanced Security + LoadLatestGHASNews(); + + // Demonstrate potential ReDoS vulnerability + string testPattern = Request.Query.ContainsKey("pattern") ? Request.Query["pattern"].ToString() ?? "aaa" : "aaa"; + try + { + bool isMatch = VulnerableRegex.IsMatch(testPattern); + _logger.LogInformation($"Regex pattern match result: {isMatch} for input: {testPattern}"); + } + catch (Exception ex) + { + // Log forging in exception handling + _logger.LogError($"Regex evaluation failed for pattern: {testPattern}. Error: {ex.Message}"); + } + + // Simulate database connection with hardcoded credentials + try + { + using var connection = new SqlConnection(CONNECTION_STRING); + _logger.LogInformation("Attempting database connection..."); + // Don't actually open connection for demo purposes + } + catch (Exception ex) + { + _logger.LogError($"Database connection failed: {ex.Message}"); + } + } + + private void LoadLatestGHASNews() + { + LatestNews = new List + { + "GitHub Advanced Security now supports enhanced code scanning with CodeQL 2.20", + "New secret scanning patterns added for over 200 service providers", + "Dependency review alerts now include detailed remediation guidance", + "Security advisories integration improved for better vulnerability management", + "Custom CodeQL queries can now be shared across organizations", + "AI-powered security suggestions available in GitHub Copilot for Security", + "New compliance frameworks supported in security overview dashboard", + "Enhanced SARIF support for third-party security tools integration" + }; + + // Potential JSON deserialization vulnerability + string jsonData = JsonConvert.SerializeObject(LatestNews); + var deserializedData = JsonConvert.DeserializeObject>(jsonData); + + _logger.LogInformation($"Loaded {LatestNews.Count} news items about GitHub Advanced Security"); + } + + public IActionResult OnPostTestRegex(string pattern) + { + if (string.IsNullOrEmpty(pattern)) + return BadRequest("Pattern cannot be empty"); + + // Log forging vulnerability in POST handler + _logger.LogInformation($"Testing regex pattern submitted by user: {pattern}"); + + try + { + // Vulnerable regex that could cause ReDoS + bool result = VulnerableRegex.IsMatch(pattern); + TempData["RegexResult"] = $"Pattern '{pattern}' match result: {result}"; + } + catch (Exception ex) + { + // Logging sensitive information + _logger.LogError($"Regex test failed for pattern: {pattern}. Exception: {ex}"); + TempData["RegexError"] = "Pattern evaluation failed"; + } + + return RedirectToPage(); + } + } +} diff --git a/src/webapp01/Pages/Index.cshtml b/src/webapp01/Pages/Index.cshtml index b47ad33..e0db7f6 100644 --- a/src/webapp01/Pages/Index.cshtml +++ b/src/webapp01/Pages/Index.cshtml @@ -6,7 +6,12 @@
-
.NET 💜 Azure v4
+
.NET 💜 Azure v5

Learn about building Web apps with ASP.NET Core.

+

Visit our About GHAS page to learn about GitHub Advanced Security features.

+

+ New! Check out our DevSecOps Demo + page to see the latest GHAS features and security demonstrations. +

diff --git a/src/webapp01/Pages/Shared/_Layout.cshtml b/src/webapp01/Pages/Shared/_Layout.cshtml index b897a49..bcaf503 100644 --- a/src/webapp01/Pages/Shared/_Layout.cshtml +++ b/src/webapp01/Pages/Shared/_Layout.cshtml @@ -18,11 +18,16 @@ aria-expanded="false" aria-label="Toggle navigation"> -