From 5be6b0d8b8dc6bca5d1eeb54130d0373f449c40a Mon Sep 17 00:00:00 2001 From: JoftheV <162925657+JoftheV@users.noreply.github.com> Date: Wed, 11 Sep 2024 16:10:10 -0500 Subject: [PATCH 1/2] Create msbuild.yml Customizing when workflow runs are triggered Set your workflow to run on push events to the main and release/* branches on: push: branches: - main - release/* Set your workflow to run on pull_request events that target the main branch on: pull_request: branches: - main Set your workflow to run every day of the week from Monday to Friday at 2:00 UTC on: schedule: - cron: "0 2 * * 1-5" For more information, see "Events that trigger workflows." Manually running a workflow To manually run a workflow, you can configure your workflow to use the workflow_dispatch event. This enables a "Run workflow" button on the Actions tab. on: workflow_dispatch: For more information, see "Manually running a workflow." Running your jobs on different operating systems GitHub Actions provides hosted runners for Linux, Windows, and macOS. To set the operating system for your job, specify the operating system using runs-on: jobs: my_job: name: deploy to staging runs-on: ubuntu-22.04 The available virtual machine types are: ubuntu-latest, ubuntu-22.04, or ubuntu-20.04 windows-latest, windows-2022, or windows-2019 macos-latest, macos-13, or macos-12 For more information, see "Virtual environments for GitHub Actions." Using an action Actions are reusable units of code that can be built and distributed by anyone on GitHub. You can find a variety of actions in GitHub Marketplace, and also in the official Actions repository. To use an action, you must specify the repository that contains the action. We also recommend that you specify a Git tag to ensure you are using a released version of the action. - name: Setup Node uses: actions/setup-node@v4 with: node-version: '20.x' For more information, see "Workflow syntax for GitHub Actions." Running a command You can run commands on the job's virtual machine. - name: Install Dependencies run: npm install For more information, see "Workflow syntax for GitHub Actions." Running a job across a matrix of operating systems and runtime versions You can automatically run a job across a set of different values, such as different versions of code libraries or operating systems. For example, this job uses a matrix strategy to run across 3 versions of Node and 3 operating systems: jobs: test: name: Test on node ${{ matrix.node_version }} and ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: node_version: ['18.x', '20.x'] os: [ubuntu-latest, windows-latest, macOS-latest] steps: - uses: actions/checkout@v4 - name: Use Node.js ${{ matrix.node_version }} uses: actions/setup-node@v4 with: node-version: ${{ matrix.node_version }} - name: npm install, build and test run: | npm install npm run build --if-present npm test For more information, see "Workflow syntax for GitHub Actions." Running steps or jobs conditionally GitHub Actions supports conditions on steps and jobs using data present in your workflow context. For example, to run a step only as part of a push and not in a pull_request, you can specify a condition in the if: property based on the event name: steps: - run: npm publish if: github.event_name == 'push' For more information, see "Contexts and expression syntax for GitHub Actions." --- .github/workflows/msbuild.yml | 416 ++++++++++++++++++++++++++++++++++ 1 file changed, 416 insertions(+) create mode 100644 .github/workflows/msbuild.yml diff --git a/.github/workflows/msbuild.yml b/.github/workflows/msbuild.yml new file mode 100644 index 000000000..29382128f --- /dev/null +++ b/.github/workflows/msbuild.yml @@ -0,0 +1,416 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +name: MSBuild + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +env: + # Path to the solution file relative to the root of the project. + SOLUTION_FILE_PATH: . + + # Configuration type to build. + # You can convert this to a build matrix if you need coverage of multiple configuration types. + # https://docs.github.com/actions/learn-github-actions/managing-complex-workflows#using-a-build-matrix + BUILD_CONFIGURATION: Release + +permissions: + contents: read + +jobs: + build: + runs-on: windows-latest + + steps: + - uses: actions/checkout@v4 + + - name: Add MSBuild to PATH + uses: microsoft/setup-msbuild@v1.0.2 + + - name: Restore NuGet packages + working-directory: ${{env.GITHUB_WORKSPACE}} + run: nuget restore ${{env.SOLUTION_FILE_PATH}} + + - name: Build + working-directory: ${{env.GITHUB_WORKSPACE}} + - name: Upload a Build Artifact + uses: actions/upload-artifact@v3.2.1-node20 + with: + # Artifact name + name: # optional, default is artifact + # A file, directory or wildcard pattern that describes what to upload + path: + # The desired behavior if no files are found using the provided path. +Available Options: + warn: Output a warning but do not fail the action + error: Fail the action with an error message + ignore: Do not output any warnings or errors, the action does not fail + + if-no-files-found: # optional, default is warn + # Duration after which artifact will expire in days. 0 means using default retention. +Minimum 1 day. Maximum 90 days unless changed from the repository settings page. + + retention-days: # optional + # If true, hidden files will be included in the uploaded artifact. If false, hidden files will be excluded from the uploaded artifact. + + include-hidden-files: # optional, default is false + - name: Setup .NET Core SDK + uses: actions/setup-dotnet@v4.0.1 + with: + # Optional SDK version(s) to use. If not provided, will install global.json version when available. Examples: 2.2.104, 3.1, 3.1.x, 3.x, 6.0.2xx + dotnet-version: # optional + # Optional quality of the build. The possible values are: daily, signed, validated, preview, ga. + dotnet-quality: # optional + # Optional global.json location, if your global.json isn't located in the root of the repo. + global-json-file: # optional + # Optional package source for which to set up authentication. Will consult any existing NuGet.config in the root of the repo and provide a temporary NuGet.config using the NUGET_AUTH_TOKEN environment variable as a ClearTextPassword + source-url: # optional + # Optional OWNER for using packages from GitHub Package Registry organizations/users other than the current repository's owner. Only used if a GPR URL is also provided in source-url + owner: # optional + # Optional NuGet.config location, if your NuGet.config isn't located in the root of the repo. + config-file: # optional + # Optional input to enable caching of the NuGet global-packages folder + cache: # optional + # Used to specify the path to a dependency file: packages.lock.json. Supports wildcards or a list of file names for caching multiple dependencies. + cache-dependency-path: # optional + - name: Setup Java JDK + uses: actions/setup-java@v4.3.0 + with: + # The Java version to set up. Takes a whole or semver Java version. See examples of supported syntax in README file + java-version: # optional + # The path to the `.java-version` file. See examples of supported syntax in README file + java-version-file: # optional + # Java distribution. See the list of supported distributions in README file + distribution: + # The package type (jdk, jre, jdk+fx, jre+fx) + java-package: # optional, default is jdk + # The architecture of the package (defaults to the action runner's architecture) + architecture: # optional + # Path to where the compressed JDK is located + jdkFile: # optional + # Set this option if you want the action to check for the latest available version that satisfies the version spec + check-latest: # optional + # ID of the distributionManagement repository in the pom.xml file. Default is `github` + server-id: # optional, default is github + # Environment variable name for the username for authentication to the Apache Maven repository. Default is $GITHUB_ACTOR + server-username: # optional, default is GITHUB_ACTOR + # Environment variable name for password or token for authentication to the Apache Maven repository. Default is $GITHUB_TOKEN + server-password: # optional, default is GITHUB_TOKEN + # Path to where the settings.xml file will be written. Default is ~/.m2. + settings-path: # optional + # Overwrite the settings.xml file if it exists. Default is "true". + overwrite-settings: # optional, default is true + # GPG private key to import. Default is empty string. + gpg-private-key: # optional + # Environment variable name for the GPG private key passphrase. Default is $GPG_PASSPHRASE. + gpg-passphrase: # optional + # Name of the build platform to cache dependencies. It can be "maven", "gradle" or "sbt". + cache: # optional + # The path to a dependency file: pom.xml, build.gradle, build.sbt, etc. This option can be used with the `cache` option. If this option is omitted, the action searches for the dependency file in the entire repository. This option supports wildcards and a list of file names for caching multiple dependencies. + cache-dependency-path: # optional + # Workaround to pass job status to post job step. This variable is not intended for manual setting + job-status: # optional, default is ${{ job.status }} + # The token used to authenticate when fetching version manifests hosted on github.com, such as for the Microsoft Build of OpenJDK. When running this action on github.com, the default value is sufficient. When running on GHES, you can pass a personal access token for github.com if you are experiencing rate limiting. + token: # optional, default is ${{ github.server_url == 'https://github.com' && github.token || '' }} + # Name of Maven Toolchain ID if the default name of "${distribution}_${java-version}" is not wanted. See examples of supported syntax in Advanced Usage file + mvn-toolchain-id: # optional + # Name of Maven Toolchain Vendor if the default name of "${distribution}" is not wanted. See examples of supported syntax in Advanced Usage file + mvn-toolchain-vendor: # optional + - name: Rainforest QA GitHub Action + # You may pin to the exact commit or the version. + # uses: rainforestapp/github-action@5855f2c2427c564554663926b143b9ce3bce17c8 + uses: rainforestapp/github-action@v3.2.5 + with: + # An arbitrary string to associate with the run + description: # optional, default is + # Only run tests tied to this Run Group + run_group_id: + # Use a specific environment for this run + environment_id: # optional, default is + # Use a specific URL for this run + custom_url: # optional, default is + # How other currently in-progress runs should be handled. Values are `cancel` to cancel runs in the same environment as your new run and `cancel-all` to cancel all runs + conflict: # optional, default is + # The execution method to use for this run + execution_method: # optional, default is + # DEPRECATED: Use `execution_method` instead + crowd: # optional, default is + # Manually entered release information about the release the run is associated with + release: # optional, default is + # If set to a value > 0 and a test fails, it will be retried within the same run, up to that number of times + automation_max_retries: # optional, default is + # Use a specific Rainforest branch for this run + branch: # optional, default is + # Your Rainforest QA API token + token: + # Do not wait for a run to complete before exiting + background: # optional, default is + # Set to true to run parameter validations without starting a new Rainforest run + dry_run: # optional, default is + # The cache key to use for saving/restoring a Rainforest run ID (used to rerun failed tests) + cache_key: # optional, default is ${{ github.job }}-${{ github.action }} + - name: Setup Autify Command Line Interface (CLI) + # You may pin to the exact commit or the version. + # uses: autifyhq/actions-setup-cli@62decfdbb7902d221d4865ae6170d5dbcd3c90db + uses: autifyhq/actions-setup-cli@v2.1.2 + with: + # Shell installer URL + shell-installer-url: # optional, default is https://autify-cli-assets.s3.amazonaws.com/autify-cli/channels/stable/install-cicd.bash + - name: Deploy Environment + # You may pin to the exact commit or the version. + # uses: parasoft/deploy-environment-action@486e9382c6c2958fcbaad60b895da799d16730c2 + uses: parasoft/deploy-environment-action@1.0.2 + with: + # CTP URL + ctpUrl: + # CTP Username + ctpUsername: + # CTP Password + ctpPassword: + # Name of the system + system: + # Name of the environment + environment: + # Name of the environment instance to provision + instance: + # Fail action and abort on provisioning failure + abortOnFailure: # optional + # Virtual assets in the environment will be replicated on another server + copyToVirtualize: # optional + # The environment assets will be copied to a Virtualize server matching this name + virtServerName: # optional + # The name for the replicated environment can be used to later destroy the environment + newEnvironmentName: # optional + # Duplicate associated data repositories before provisioning + duplicateDataRepo: # optional + # Where to duplicate data repository + duplicateType: # optional + # The host of the data repository server + repoHost: # optional + # The port of the data repository server + repoPort: # optional + # The username of the data repository server + repoUsername: # optional + # The password of the data repository server + repoPassword: # optional + - name: Autify for Web Run Test Plan + # You may pin to the exact commit or the version. + # uses: autifyhq/web-run-test-plan-action@d7c3dc4ecacc9e71f48914d12c2b1ab2effbd1c0 + uses: autifyhq/web-run-test-plan-action@v1.0.0 + with: + # Personal Access Token + autify_for_web_api_token: + # Test Plan ID that you want to run + test_plan_id: + # Test Plan API base URL + test_plan_api_base_url: # optional, default is https://app.autify.com/api/v1/schedules/ + - name: Autify for Mobile Run Test Plan + # You may pin to the exact commit or the version. + # uses: autifyhq/mobile-run-test-plan-action@40bebde5f60117ec81312051028d22238649d280 + uses: autifyhq/mobile-run-test-plan-action@v1.0.0 + with: + # Personal Access Token + autify_for_mobile_api_token: + # Test Plan ID that you want to run + test_plan_id: + # Build ID that you want to use + build_id: + # Test Plan API base URL + test_plan_api_base_url: # optional, default is https://mobile-app.autify.com/api/v1/test_plans/ + - name: Rational Integration Tester GitHub Action + # You may pin to the exact commit or the version. + # uses: IBM/devopsauto-integrtest-actions@5096cad49a7c728d0de5931481f5567254c70a70 + uses: IBM/devopsauto-integrtest-actions@v1 + with: + # The fully qualified path to Rational Integration Tester project directory. This value will be ignored if parameterFile field is used. + projectDir: + # The name of the API test project. This value will be ignored if parameterFile field is used. + projectName: + # The API Test environment to use for this execution. This value will be ignored if parameterFile field is used. + environment: + # Semicolon separated list of tests/suites to run. This value will be ignored if parameterFile field is used. + tests: + # The fully qualified path to a parameter file that contains project, environment, and run arguments for one or more tests. + parameterFile: # optional + # Specify the folder to export the JUnit reports to. + junitDir: # optional + - name: Rational Test Automation Server GitHub Action + # You may pin to the exact commit or the version. + # uses: IBM/devopsauto-testserver-actions@c704855bdbbae4f0df152919d00fb0bd40f67ee0 + uses: IBM/devopsauto-testserver-actions@v1 + with: + # Rational Test Automation Server URL + serverUrl: + # Rational Test Automation Server Offline Token + offlineToken: + # Team Space name + teamspace: + # Project name + project: + # Branch name in which the test exists + branch: + # AssetId of the test in Rational Test Automation Server. + assetId: + # Optional. Test environment corresponding to the test. Mandatory to input the value if you want to run API test. + environment: # optional + # Optional. Comma (,) delimited values of datasets for the job to run, For example: source:replacement;source:replacement + datasets: # optional + # Optional. Labels corresponding to the test. For example: label1, label2 + labels: # optional + # Optional. Secrets collection name for the job to run. + secretsCollection: # optional + # Optional. Variables corresponding to the test. You must enter the variables in the format: name_of_the_variable=value_of_the_variable + variables: # optional + - name: Rational Performance Tester GitHub Action + # You may pin to the exact commit or the version. + # uses: IBM/devopsauto-perftest-actions@01df3e50f975ce359966c15da94a9279a2d50504 + uses: IBM/devopsauto-perftest-actions@v1 + with: + # The complete path to the Eclipse workspace, required if Config File is not specified + workspace: + # The name of a project within the workspace to use, required if Config File is not specified + project: + # Specify the relative path from the project to the test including the file name of the test. A test can be a Performance test, Schedule, or Compound test and required if configFile is not specified. + suite: + # The path to the IMShared folder in your local installation, if it is not at default location. This is where plugins and product dependencies will be located by the execution agent, required if Config File is not specified + imShared: # optional + # Use this option to specify the complete path to a file that contains the parameters for a test run. If Config file is specified then no other fields will be required. + configFile: # optional + # You can use this option to specify the duration of the stages in the Rate Schedule + duration: # optional + # You can use this option to specify the file directory path to store the exported HTTP test log. You can provide multiple parameter entries when running multiple tests. You must use a colon to separate the parameter entries. For example: c:/logexport.txt:c:/secondlogexport.txt + exportLog: # optional + # You can use this option to specify a comma-separated list of report IDs along with exportstats or exportstatshtml to list the reports that you want to export in place of the default reports, or the reports selected under Preferences. To retrieve the report IDs, navigate to Window > Preferences > Test > Performance Test Reports > Export Reports from Rational Performance Tester and under Select reports to export, select the required reports, and click Copy ID to clipboard. + exportStatReportList: + # Use this option to provide the complete path to a directory that you can use to store the exported report in a comma-separated values (CSV) format. + exportStats: # optional + # Use this option to enter one or more formats for the reports that you want to export by using a comma as a separator. The options are simple.csv, full.csv, simple.json, full.json, csv, and json. When you want to export both simple and full reports in json or csv format, you can specify json or csv as the options. The reports are saved to the location specified in the exportStats field. This field must be used in conjunction with exportStats field. + exportStatsFormat: + # Use this option to provide the complete path to a directory that you can use to export web analytic results. You can analyze the results on a web browser without using Rational Performance Tester. + exportStatsHtml: + # Use this option to add labels to test results. To add multiple labels to a test result, you must separate each label by using a comma. + labels: # optional + # Use this option to enable the Resource Monitoring from Service option for a performance schedule if the Resource Monitoring from Service option is not enabled from the schedule editor in Rational Performance Tester, ignore Resource Monitoring sources that were set in the performance schedule and to change for a label matching mode, replace an existing set of Resource Monitoring labels that were set in the performance schedule and run the schedule with a new set of Resource Monitoring labels. + overrideRmLabels: # optional + # Determines whether a result file with the same name is overwritten. The default value, false, indicates that the new result file is created. If the value is true, the file is overwritten and retains the same file name. + overwrite: # optional + # You can use this option to publish test results to the Server. The format is: serverURL#project.name=projectName&teamspace.name=teamspaceName. If the name of the project or team space contains a special character, then you must replace it with %. + publish: # optional + # Use this option to publish the test results based on the completion status of the tests. The supported values are FAIL,PASS,INCONCLUSIVE,ERROR,ALL. + publishFor: # optional + # Use this option to publish specific test results to Rational Test Automation Server. The supported values are STATS, TESTLOG. + publishReports: # optional + # Use this option to specify a rate that you want to achieve for a workload in the Rate Runner group. For example, "Rate Runner Group 1=1/s, 3/m", where, Rate Runner Group1 is the name of the rate runner group that has two stages. The desired rate for the first stage is one iteration per second and the rate for the second stage is three iterations per minute. + rate: # optional + # Use this option when you want to view a record of all events that occurred during a test or schedule run. Supported values are jaeger, testlog, null + reportHistory: # optional + # Use this option to specify the name of the results file. The default name of the result file is the test or schedule name with a timestamp appended. You must specify a folder name that is relative to the project to store the test results. + results: # optional + # Use this option to replace dataset values during a test or schedule run. You must ensure that both original and new datasets are in the same workspace and have the same column names. You must also include the path to the dataset. For example, /project_name/ds_path/ds_filename.csv:/project_name/ds_path/new_ds_filename.csv' + swapDatasets: # optional + # Use this option to add text that you want to display in the user comments row of the report. + userComments: # optional + # Overrides the default number of virtual users in the run. For a schedule, the default is the number of users specified in the schedule editor. For a test, the default is one user. + users: # optional + # Use this option to specify the complete path to the XML file that contains the variable initialization. + varFile: # optional + # Use this option to specify the Java maximum heap size for the Java process that controls the playback. For example, when you input the value as -Xmx4096m, it specifies the maximum heap size as 4GB. + vmArgs: # optional + - name: Rational Performance Tester GitHub Action + # You may pin to the exact commit or the version. + # uses: IBM/devopsauto-perftest-actions@01df3e50f975ce359966c15da94a9279a2d50504 + uses: IBM/devopsauto-perftest-actions@v1 + with: + # The complete path to the Eclipse workspace, required if Config File is not specified + workspace: + # The name of a project within the workspace to use, required if Config File is not specified + project: + # Specify the relative path from the project to the test including the file name of the test. A test can be a Performance test, Schedule, or Compound test and required if configFile is not specified. + suite: + # The path to the IMShared folder in your local installation, if it is not at default location. This is where plugins and product dependencies will be located by the execution agent, required if Config File is not specified + imShared: # optional + # Use this option to specify the complete path to a file that contains the parameters for a test run. If Config file is specified then no other fields will be required. + configFile: # optional + # You can use this option to specify the duration of the stages in the Rate Schedule + duration: # optional + # You can use this option to specify the file directory path to store the exported HTTP test log. You can provide multiple parameter entries when running multiple tests. You must use a colon to separate the parameter entries. For example: c:/logexport.txt:c:/secondlogexport.txt + exportLog: # optional + # You can use this option to specify a comma-separated list of report IDs along with exportstats or exportstatshtml to list the reports that you want to export in place of the default reports, or the reports selected under Preferences. To retrieve the report IDs, navigate to Window > Preferences > Test > Performance Test Reports > Export Reports from Rational Performance Tester and under Select reports to export, select the required reports, and click Copy ID to clipboard. + exportStatReportList: + # Use this option to provide the complete path to a directory that you can use to store the exported report in a comma-separated values (CSV) format. + exportStats: # optional + # Use this option to enter one or more formats for the reports that you want to export by using a comma as a separator. The options are simple.csv, full.csv, simple.json, full.json, csv, and json. When you want to export both simple and full reports in json or csv format, you can specify json or csv as the options. The reports are saved to the location specified in the exportStats field. This field must be used in conjunction with exportStats field. + exportStatsFormat: + # Use this option to provide the complete path to a directory that you can use to export web analytic results. You can analyze the results on a web browser without using Rational Performance Tester. + exportStatsHtml: + # Use this option to add labels to test results. To add multiple labels to a test result, you must separate each label by using a comma. + labels: # optional + # Use this option to enable the Resource Monitoring from Service option for a performance schedule if the Resource Monitoring from Service option is not enabled from the schedule editor in Rational Performance Tester, ignore Resource Monitoring sources that were set in the performance schedule and to change for a label matching mode, replace an existing set of Resource Monitoring labels that were set in the performance schedule and run the schedule with a new set of Resource Monitoring labels. + overrideRmLabels: # optional + # Determines whether a result file with the same name is overwritten. The default value, false, indicates that the new result file is created. If the value is true, the file is overwritten and retains the same file name. + overwrite: # optional + # You can use this option to publish test results to the Server. The format is: serverURL#project.name=projectName&teamspace.name=teamspaceName. If the name of the project or team space contains a special character, then you must replace it with %. + publish: # optional + # Use this option to publish the test results based on the completion status of the tests. The supported values are FAIL,PASS,INCONCLUSIVE,ERROR,ALL. + publishFor: # optional + # Use this option to publish specific test results to Rational Test Automation Server. The supported values are STATS, TESTLOG. + publishReports: # optional + # Use this option to specify a rate that you want to achieve for a workload in the Rate Runner group. For example, "Rate Runner Group 1=1/s, 3/m", where, Rate Runner Group1 is the name of the rate runner group that has two stages. The desired rate for the first stage is one iteration per second and the rate for the second stage is three iterations per minute. + rate: # optional + # Use this option when you want to view a record of all events that occurred during a test or schedule run. Supported values are jaeger, testlog, null + reportHistory: # optional + # Use this option to specify the name of the results file. The default name of the result file is the test or schedule name with a timestamp appended. You must specify a folder name that is relative to the project to store the test results. + results: # optional + # Use this option to replace dataset values during a test or schedule run. You must ensure that both original and new datasets are in the same workspace and have the same column names. You must also include the path to the dataset. For example, /project_name/ds_path/ds_filename.csv:/project_name/ds_path/new_ds_filename.csv' + swapDatasets: # optional + # Use this option to add text that you want to display in the user comments row of the report. + userComments: # optional + # Overrides the default number of virtual users in the run. For a schedule, the default is the number of users specified in the schedule editor. For a test, the default is one user. + users: # optional + # Use this option to specify the complete path to the XML file that contains the variable initialization. + varFile: # optional + # Use this option to specify the Java maximum heap size for the Java process that controls the playback. For example, when you input the value as -Xmx4096m, it specifies the maximum heap size as 4GB. + vmArgs: # optional + - name: 42Crunch API Conformance Scan + # You may pin to the exact commit or the version. + # uses: 42Crunch/cicd-github-actions@e2c9a02bb391932aee6ef994de06ff2c7aae9ff6 + uses: 42Crunch/cicd-github-actions@v1 + with: + # API token to access 42Crunch Platform. Check https://docs.42crunch.com/latest/content/tasks/integrate_github_actions.htm for details + api-token: + # 42Crunch platform URL + platform-url: # optional, default is https://platform.42crunch.com + # Path to JSON report from audit step + audit-report-path: + # List of API UUIDs to use + api-ids: # optional + # Path to save sarif report, add 'Convert to sarif' stage + convert-to-sarif: # optional + # Add 'Upload sarif' and 'Convert to sarif' stage + upload-sarif: # optional + # Add 'Check sqg' stage + check-sqg: # optional + # GitHub token for uploading results to Github Code Scanning + github-token: # optional, default is ${{ github.token }} + - name: Probely DAST for Security Scans + # You may pin to the exact commit or the version. + # uses: Probely/probely-github-action@57264e5cafb33371168943413f91d1fd3792b210 + uses: Probely/probely-github-action@v1.0.0 + with: + # Probely's API Key + api-key: + # Probely's identifier of the target to scan + target-id: + # Probely's region where your account is hosted + region: # optional, default is eu + # The scan profile to use in the scan. If not defined, the scan profile will be the one defined in the target settings. + scan-profile: # optional, default is + # Add additional options to the MSBuild command line here (like platform or verbosity level). + # See https://docs.microsoft.com/visualstudio/msbuild/msbuild-command-line-reference + run: msbuild /m /p:Configuration=${{env.BUILD_CONFIGURATION}} ${{env.SOLUTION_FILE_PATH}} From c0006e1abe33c1c6155296e4ae43c58d4fc7f3cc Mon Sep 17 00:00:00 2001 From: JoftheV <162925657+JoftheV@users.noreply.github.com> Date: Wed, 11 Sep 2024 16:25:14 -0500 Subject: [PATCH 2/2] Create codeql.yml Getting started with a workflow To help you get started, this guide shows you some basic examples. For the full GitHub Actions documentation on workflows, see "Configuring workflows." Customizing when workflow runs are triggered Set your workflow to run on push events to the main and release/* branches on: push: branches: - main - release/* Set your workflow to run on pull_request events that target the main branch on: pull_request: branches: - main Set your workflow to run every day of the week from Monday to Friday at 2:00 UTC on: schedule: - cron: "0 2 * * 1-5" For more information, see "Events that trigger workflows." Manually running a workflow To manually run a workflow, you can configure your workflow to use the workflow_dispatch event. This enables a "Run workflow" button on the Actions tab. on: workflow_dispatch: For more information, see "Manually running a workflow." Running your jobs on different operating systems GitHub Actions provides hosted runners for Linux, Windows, and macOS. To set the operating system for your job, specify the operating system using runs-on: jobs: my_job: name: deploy to staging runs-on: ubuntu-22.04 The available virtual machine types are: ubuntu-latest, ubuntu-22.04, or ubuntu-20.04 windows-latest, windows-2022, or windows-2019 macos-latest, macos-13, or macos-12 For more information, see "Virtual environments for GitHub Actions." Using an action Actions are reusable units of code that can be built and distributed by anyone on GitHub. You can find a variety of actions in GitHub Marketplace, and also in the official Actions repository. To use an action, you must specify the repository that contains the action. We also recommend that you specify a Git tag to ensure you are using a released version of the action. - name: Setup Node uses: actions/setup-node@v4 with: node-version: '20.x' For more information, see "Workflow syntax for GitHub Actions." Running a command You can run commands on the job's virtual machine. - name: Install Dependencies run: npm install For more information, see "Workflow syntax for GitHub Actions." Running a job across a matrix of operating systems and runtime versions You can automatically run a job across a set of different values, such as different versions of code libraries or operating systems. For example, this job uses a matrix strategy to run across 3 versions of Node and 3 operating systems: jobs: test: name: Test on node ${{ matrix.node_version }} and ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: matrix: node_version: ['18.x', '20.x'] os: [ubuntu-latest, windows-latest, macOS-latest] steps: - uses: actions/checkout@v4 - name: Use Node.js ${{ matrix.node_version }} uses: actions/setup-node@v4 with: node-version: ${{ matrix.node_version }} - name: npm install, build and test run: | npm install npm run build --if-present npm test For more information, see "Workflow syntax for GitHub Actions." Running steps or jobs conditionally GitHub Actions supports conditions on steps and jobs using data present in your workflow context. For example, to run a step only as part of a push and not in a pull_request, you can specify a condition in the if: property based on the event name: steps: - run: npm publish if: github.event_name == 'push' For more information, see "Contexts and expression syntax for GitHub Actions." --- .github/workflows/codeql.yml | 558 +++++++++++++++++++++++++++++++++++ 1 file changed, 558 insertions(+) create mode 100644 .github/workflows/codeql.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 000000000..4bd88fe8d --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,558 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL Advanced" + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + schedule: + - cron: '34 20 * * 1' + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners (GitHub.com only) + # Consider using larger runners or machines with greater resources for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + permissions: + # required for all workflows + security-events: write + + # required to fetch internal or private CodeQL packs + packages: read + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + include: + - language: c-cpp + build-mode: autobuild + # CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' + # Use `c-cpp` to analyze code written in C, C++ or both + # Use 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, + # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. + # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how + # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + # If the analyze step fails for one of the languages you are analyzing with + # "We were unable to automatically build your code", modify the matrix above + # to set the build mode to "manual" for that language. Then modify this step + # to build your code. + # â„šī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + shell: bash + run: | + echo 'If you are using a "manual" build mode for one or more of the' \ + 'languages you are analyzing, replace this with the commands to build' \ + 'your code, for example:' + echo ' make bootstrap' + echo ' make release' + exit 1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" + - name: Lightstep Pre-Deploy Check + # You may pin to the exact commit or the version. + # uses: lightstep/lightstep-action-predeploy@22bec553a6d0fb3de5026acb1159085601f76408 + uses: lightstep/lightstep-action-predeploy@v0.2.6 + with: + # The organization associated with your Lightstep account (usually your company name) + lightstep_organization: # optional + # The Lightstep project associated with this repository + lightstep_project: # optional + # The key to access the Lightstep Public API + lightstep_api_key: # optional + # The token to access the Rollbar API + rollbar_api_token: # optional + # The token to access the PagerDuty API + pagerduty_api_token: # optional + # If set to true, collapse all conditions to a single table row + rollup_conditions: # optional + # If set to true, will not add a comment to pull-requests + disable_comment: # optional + - name: elmah.io Upload Source Map Action + # You may pin to the exact commit or the version. + # uses: elmahio/github-upload-source-map-action@02d3e0a71cb7741a98860405f287c4abf95f62bc + uses: elmahio/github-upload-source-map-action@v1 + with: + # An API key with permission to upload source maps. + apiKey: + # The ID of the log which should contain the minified JavaScript and source map. + logId: + # An URL to the online minified JavaScript file. The URL can be absolute or relative but will always be converted to a relative path (no protocol, domain, and query parameters). elmah.io uses this path to lookup any lines in a JS stack trace that will need de-minification. + path: + # A path to the source map file. Only files with an extension of .map and content type of application/json will be accepted. + sourceMap: + # A path to the minified JavaScript file. Only files with an extension of .js and content type of text/javascript will be accepted. + minifiedJavaScript: + - name: Publish event + # You may pin to the exact commit or the version. + # uses: fiberplane/publish-event@63e125ddca44bfb308eec949bcb22f80230394b0 + uses: fiberplane/publish-event@v1.2 + with: + # API token used to access the Fiberplane API with + api-token: + # Title of the newly created event + title: # default is GitHub Action + # Labels to add to the event. +Format: key=value|key=value|key=value + + labels: + # Time at which the event occurred. Defaults to current time. Format should be a RFC 3339 formatted string + time: # optional + # ID of the workspace to which the event should be posted + workspace-id: + # Base URL of the Fiberplane API + fp-base-url: # default is https://studio.fiberplane.com + - name: rootly-pulse + # You may pin to the exact commit or the version. + # uses: rootlyhq/pulse-action@7aa3a8baf889ff8b37a489dde53edece73b24a64 + uses: rootlyhq/pulse-action@v1.1.1 + with: + # Summary of the pulse + summary: + # A API key for rootly + api_key: + # Environments associated with the pulse. Separate with commas. + environments: # optional + # Services associated with the pulse. Separate with commas. + services: # optional + # Labels associated with the pulse. Separate with commas and separate key-value pair with = (no spaces before or after =). + labels: # optional + # Source of the pulse + source: # optional + # Refs associated with the pulse. Separate with commas and separate key-value pair with = (no spaces before or after =). + refs: # optional + - name: Sync Templates + # You may pin to the exact commit or the version. + # uses: fiberplane/sync-templates@e35786a91f4d6ec8f9b4df0ccfa66770cff78083 + uses: fiberplane/sync-templates@v1 + with: + # API token used to access the Fiberplane API with + api-token: + # ID of the workspace to which the templates should be uploaded to + workspace-id: + # Base URL of the Fiberplane API + fp-base-url: # optional, default is https://studio.fiberplane.com + # Custom directory that should be monitored for Template JSONNET files (default: .fiberplane/templates/) + templates-directory: # optional, default is .fiberplane/templates/ + # Version of the Fiberplane CLI to use (latest by default) + fp-version: # optional, default is latest + - name: Deploy Prometheus and Grafana + # You may pin to the exact commit or the version. + # uses: bitovi/github-actions-deploy-prometheus@60abab51796e327667fc11d63a8ac75b9e2834b9 + uses: bitovi/github-actions-deploy-prometheus@v0.1.0 + with: + # Specifies if this action should checkout the code + checkout: # optional, default is true + # AWS access key ID + aws_access_key_id: + # AWS secret access key + aws_secret_access_key: + # AWS session token, if you're using temporary credentials + aws_session_token: # optional + # AWS default region + aws_default_region: # default is us-east-1 + # Auto-generated by default so it's unique for org/repo/branch. Set to override with custom naming the unique AWS resource identifier for the deployment. Defaults to `${org}-${repo}-${branch}`. + aws_resource_identifier: # optional + # A list of additional tags that will be included on created resources. Example: `{"key1": "value1", "key2": "value2"}` + aws_extra_tags: # optional, default is {} + # Secret name to pull env variables from AWS Secret Manager, could be a comma separated list, read in order. Expected JSON content. + env_aws_secret: # optional + # File containing environment variables to be used with the app + env_repo: # optional + # `.env` file to be used with the app from Github secrets + env_ghs: # optional + # `.env` file to be used with the app from Github variables + env_ghv: # optional + # The AWS EC2 instance type + aws_ec2_instance_type: # optional, default is t2.medium + # The AWS IAM instance profile to use for the EC2 instance. Use if you want to pass an AWS role with specific permissions granted to the instance + aws_ec2_instance_profile: # optional + # Creates a Secret in AWS secret manager to store a kypair + aws_ec2_create_keypair_sm: # optional + # Root disk size for the EC2 instance + aws_ec2_instance_vol_size: # optional, default is 10 + # A JSON object of additional tags that will be included on created resources. Example: `{"key1": "value1", "key2": "value2"}` + aws_ec2_additional_tags: # optional + # AMI filter to use when searching for an AMI to use for the EC2 instance. Defaults to `ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-*` + aws_ec2_ami_filter: # optional, default is ubuntu/images/hvm-ssd/ubuntu-focal-20.04-amd64-server-* + # Set to true to provision infrastructure (with Terraform) but skip the app deployment (with ansible) + infrastructure_only: # optional, default is false + # Path to the grafana datasource directory + grafana_datasource_dir: # optional, default is observability/grafana/datasources + # Path to the prometheus config file + prometheus_config: # optional, default is observability/prometheus/prometheus.yml + # Set to "true" to Destroy the created AWS infrastructure for this instance + tf_stack_destroy: # optional, default is false + # Change this to be anything you want to. Carefull to be consistent here. A missing file could trigger recreation, or stepping over destruction of non-defined objects. + tf_state_file_name: # optional + # Append a string to the tf-state-file. Setting this to `unique` will generate `tf-state-aws-unique`. Can co-exist with the tf_state_file_name variable. + tf_state_file_name_append: # optional + # AWS S3 bucket to use for Terraform state. Defaults to `${org}-${repo}-{branch}-tf-state-aws` + tf_state_bucket: # optional + # Force purge and deletion of S3 tf_state_bucket defined. Any file contained there will be destroyed. `tf_stack_destroy` must also be `true` + tf_state_bucket_destroy: # optional + # Define the root domain name for the application. e.g. bitovi.com. If empty, ELB URL will be provided. + aws_domain_name: # optional + # Define the sub-domain part of the URL. Defaults to `${org}-${repo}-{branch}` + aws_sub_domain: # optional + # Deploy application to root domain. Will create root and www DNS records. Domain must exist in Route53. + aws_root_domain: # optional + # Existing certificate ARN to be used in the ELB. Use if you manage a certificate outside of this action. See https://docs.aws.amazon.com/acm/latest/userguide/gs-acm-list.html for how to find the certificate ARN. + aws_cert_arn: # optional + # Generates and manage the root certificate for the application to be used in the ELB. + aws_create_root_cert: # optional + # Generates and manage the sub-domain certificate for the application to be used in the ELB. + aws_create_sub_cert: # optional + # Set this to true if you want not to use a certificate in the ELB. + aws_no_cert: # optional + # Define if a VPC should be created + aws_vpc_create: # optional + # Set a specific name for the VPC + aws_vpc_name: # optional + # Define Base CIDR block which is divided into subnet CIDR blocks. Defaults to 10.0.0.0/16. + aws_vpc_cidr_block: # optional + # Comma separated list of public subnets. Defaults to 10.10.110.0/24 + aws_vpc_public_subnets: # optional + # Comma separated list of private subnets. If none, none will be created. + aws_vpc_private_subnets: # optional + # Comma separated list of availability zones. Defaults to `aws_default_region. + aws_vpc_availability_zones: # optional + # AWS VPC ID. Accepts `vpc-###` values. + aws_vpc_id: # optional + # Specify a Subnet to be used with the instance. If none provided, will pick one. + aws_vpc_subnet_id: # optional + # A JSON object of additional tags that will be included on created resources. Example: `{"key1": "value1", "key2": "value2"}` + aws_vpc_additional_tags: # optional + - name: Annotate Nobl9 SLO + # You may pin to the exact commit or the version. + # uses: nobl9/action-annotate-slo@1b79aa07d76525d32ec2d468f8afd1858e160cc1 + uses: nobl9/action-annotate-slo@v0.1.0 + with: + # Annotation to add + annotation: + # SLO to annotate + slo: # optional + # Labels to annotate + labels: # optional + # Project containing SLO to annotate + project: + # sloctl version to use + sloctl_version: # optional, default is 0.0.99 + # Nobl9 client id + nobl9_client_id: + # Nobl9 client secret + nobl9_client_secret: + # Nobl9 okta org url + nobl9_okta_org_url: # optional, default is https://accounts.nobl9.com + # Nobl9 okta auth server + nobl9_okta_auth_server: # optional, default is auseg9kiegWKEtJZC416 + # Nobl9 URL + nobl9_url: # optional, default is https://app.nobl9.com/api + - name: Autometrics Report + # You may pin to the exact commit or the version. + # uses: autometrics-dev/diff-metrics@778b3281f8446790af0afd766d5bb236defb5dde + uses: autometrics-dev/diff-metrics@v2.1.0 + with: + # Github token to use + gh-token: + # The list of rust project roots to check. One path per line + rs-roots: # optional + # The list of typescript project roots to check. One path per line + ts-roots: # optional + # The list of golang project roots to check. One path per line + go-roots: # optional + # The list of python project roots to check. One path per line + py-roots: # optional + # The number of days to keep the artifacts for. Defaults to 0 (inherits the policy from the repository) + retention-days: # optional, default is 0 + # The version of am to download, skip patch or minor to act as a wildcard. "0.2" means ">=0.2.0 && <0.3.0", "1" means ">=1.0.0 && <2.0.0", etc. + am-version: # optional + - name: Instrument pipeline + # You may pin to the exact commit or the version. + # uses: autometrics-dev/instrument-pipeline@142e4e6cbc109bb37c705daa856b6462689b3ef8 + uses: autometrics-dev/instrument-pipeline@v0.1.0 + with: + # URL to the aggregation gateway, for example `http://localhost:9091` + pushgateway: + # Type of the aggregation gateway, one of `prometheus`, `gravel`, or `zapier`. Currently only changes the url format in case of prometheus + gatewaytype: # optional + # Comma separated list of buckets for duration histogram, with or without the brackets [] + buckets: # optional + - name: Lightstep Services Change Report + # You may pin to the exact commit or the version. + # uses: lightstep/lightstep-action-snapshot@166ec5f31d611858ebe9ed3437848e8fe675fb89 + uses: lightstep/lightstep-action-snapshot@v0.2.1 + with: + # The organization associated with your Lightstep account (usually your company name) + lightstep_organization: # optional + # The Lightstep project associated with this repository + lightstep_project: # optional + # Only show services in the snapshot from this comma-separated list + lightstep_service_filter: # optional + # The query to use when taking a snapshot + lightstep_snapshot_query: # optional + # The Lightstep snapshot id to summarize + lightstep_snapshot_id: # optional + # The Lightstep snapshot id to compare with lightstep_snapshot_id + lightstep_snapshot_compare_id: # optional + # The key to access the Lightstep Public API + lightstep_api_key: # optional + # Github API Token + github_token: # optional + # If set to true, will not add a comment to pull-requests + disable_comment: # optional + - name: Setup Nsolid environment + # You may pin to the exact commit or the version. + # uses: nodesource/setup-nsolid@1ca68d2589d3d56ecd3881dfe6ffa87eeda9c939 + uses: nodesource/setup-nsolid@v1.0.1 + with: + # Node to use, if no values specified we will setup the major version available for the nsolid-version. E.g: 18.x, 20.x. + node-version: # optional + # Nsolid version to use. E.g: 5.0.5, 4.10.0, latest. + nsolid-version: + # Target operating system for Nsolid to use. E.g: linux, darwin, win32. Will use linux by default. + platform: # optional + # Target architecture for Node to use. + arch: # optional + - name: elmah.io Create Deployment Action + # You may pin to the exact commit or the version. + # uses: elmahio/github-create-deployment-action@132611db9161ecebb1b07db6510d4ea5e0d2d415 + uses: elmahio/github-create-deployment-action@v1 + with: + # An API key with permission to create deployments. + apiKey: + # The version number of this deployment. The value of version can be a SemVer compliant string or any other syntax that you are using as your version numbering scheme. + version: + # Optional description of this deployment. Can be markdown or clear text. + description: # optional + # The name of the person responsible for creating this deployment. This can be the name taken from your deployment server. + userName: # optional + # The email of the person responsible for creating this deployment. This can be the email taken from your deployment server. + userEmail: # optional + # As default, deployments are attached all logs of the organization. If you want a deployment to attach to a single log only, set this to the ID of that log. + logId: # optional + - name: Nobl9 sloctl action + # You may pin to the exact commit or the version. + # uses: nobl9/nobl9-action@b921770c1ed8d80a3dc04924074717683ea0ffa8 + uses: nobl9/nobl9-action@v0.2.8 + with: + # Client ID + client_id: + # Client Secret + client_secret: + # The path or glob pattern to the configuration in YAML format + sloctl_yml: + # Submits server-side request without persisting the configured resources + dry_run: # optional, default is false + - name: Nobl9 sloctl action + # You may pin to the exact commit or the version. + # uses: nobl9/nobl9-action@b921770c1ed8d80a3dc04924074717683ea0ffa8 + uses: nobl9/nobl9-action@v0.2.8 + with: + # Client ID + client_id: + # Client Secret + client_secret: + # The path or glob pattern to the configuration in YAML format + sloctl_yml: + # Submits server-side request without persisting the configured resources + dry_run: # optional, default is false + - name: Datadog JUnitXML Upload + # You may pin to the exact commit or the version. + # uses: DataDog/junit-upload-github-action@c4b57b587ae0e3ed618a1f0e7a7d260cfde53032 + uses: DataDog/junit-upload-github-action@v1.4.0 + with: + # (Deprecated) Datadog API key to use to upload the junit files. + api-key: # optional + # Datadog API key to use to upload the junit files. + api_key: # optional + # Service name to use with the uploaded test results. + service: + # (Deprecated) The Datadog site to upload the files to. + datadog-site: # optional, default is datadoghq.com + # The Datadog site to upload the files to. + site: # optional, default is datadoghq.com + # JUnit files to upload. + files: # default is . + # Controls the maximum number of concurrent file uploads. + concurrency: # default is 20 + # The node version used to install datadog-ci + node-version: # default is 20 + # Datadog tags to associate with the uploaded test results. + tags: # optional + # Datadog env to use for the tests. + env: # optional + # Set to "true" to enable forwarding content from XML reports as logs. + logs: # optional + # The version of the @datadog/datadog-ci package to use. It defaults to the latest release (`latest`). + datadog-ci-version: # optional, default is latest + # Extra args to be passed to the datadog-ci cli. + extra-args: # optional, default is + - name: Push Workflow Data to Tinybird + # You may pin to the exact commit or the version. + # uses: localstack/tinybird-workflow-push@518790fd8ad2665b06419c2588c744da355bb970 + uses: localstack/tinybird-workflow-push@v3.3.1 + with: + # Github token for receiving start and end time of the workflow + github_token: + # The token to authenticate with Tinybird + tinybird_token: + # The Tinybird datasource to which to push the data to + tinybird_datasource: # optional, default is ci_workflows + # The id of the workflow + workflow_id: # optional + # Optional input to manually override the outcome reported to Tinybird. By default the outcome is calculated using the worst outcome of all jobs in the current workflow run attempt. + + outcome: # optional + - name: PagerDuty Change Events + # You may pin to the exact commit or the version. + # uses: PagerDuty/pagerduty-change-events-action@ec2c5d5cff79059924d663a7427733785626c3bf + uses: PagerDuty/pagerduty-change-events-action@v1.3.0 + with: + # The integration key that identifies the service the change was made to. + integration-key: + # Custom event summary. If provided the GitHub event type is ignored and the given summary used. A link to the run is included in the event. + custom-event: # optional + - name: rss-to-issues + # You may pin to the exact commit or the version. + # uses: git-for-windows/rss-to-issues@60a6a47582d79d434bd18c6d3af3d9ab7356cf56 + uses: git-for-windows/rss-to-issues@v0.0.9 + with: + # The GITHUB_TOKEN secret + github-token: + # URL of the RSS/Atom feed + feed: + # Only look at feed items younger than this + max-age: + # Prefix added to the created issues' titles + prefix: # optional + # Labels to add, comma separated + labels: # optional + # Log issue creation but do nothing + dry-run: # optional + # Aggregate all items in a single issue + aggregate: # optional + # Limit the issue contents' size + character-limit: # optional + # Limit to feed items whose titles match this regular expression + title-pattern: # optional + # Limit to feed items whose contents match this regular expression + content-pattern: # optional + - name: Yor GitHub Action + # You may pin to the exact commit or the version. + # uses: bridgecrewio/yor-action@04bf3da0c4e8619a307c023ce8f0d196a2d8a4ee + uses: bridgecrewio/yor-action@0.3.0 + with: + # directory with infrastructure code to scan + directory: # optional, default is . + # Run scan on all checks but a specific check identifier (comma separated) + tag_groups: # optional + # comma delimited list of yor tags to apply + tag: # optional + # comma delimited list of yor tags to not apply + skip_tags: # optional + # comma delimited list of paths for yor to skip tagging of + skip_dirs: # optional + # comma delimited list of paths to external (custom) tags & tag groups plugins + custom_tags: # optional + # The format of the output. cli, json + output_format: # optional + # log level + log_level: # optional + # Choose whether the action will commit changes. Changes will be commited if this is exactly "YES" + commit_changes: # optional, default is true + - name: Honeycomb Buildevents + # You may pin to the exact commit or the version. + # uses: honeycombio/gha-buildevents@e891e91ad0fcd80b71430a97ebf5a9baecac388a + uses: honeycombio/gha-buildevents@v3.0.0 + with: + # A Honeycomb API key - needed to send traces. + apikey: + # Defaults to https://api.honeycomb.io + apihost: # optional, default is https://api.honeycomb.io + # The Honeycomb dataset to send traces to. + dataset: + # Status of the job or worfklow. Setting this signals when to end the trace. + status: # optional + # Unix timestamp to represent when the trace started. Not necessary for single job workflows. Send in final use of the action for multi-job workflows. + trace-start: # optional + # Set this to a key unique for this matrix cell, only useful when using a build matrix. + matrix-key: # optional + # Deprecated value - please use status instead + job-status: # optional + # (true/false) Whether to send an event representing the setup of this action. + send-init-event: # optional, default is true + - name: Load runner information + # You may pin to the exact commit or the version. + # uses: devops-actions/load-runner-info@7f8c07227aa6176e94e4eeb912016bb0a9d33796 + uses: devops-actions/load-runner-info@v1.0.10 + with: + # Slug of the organization to analyze. + organization: + # Slug of the repository to analyze. + repo: # optional + # Access token to use for analysis with either admin:org or repo owner if you run it against a repo + accessToken: + + + + + + + + + + + + + + + + + + + + + +