From b6a3fd7cd3a486a151a4f516d39c876b93a21ef7 Mon Sep 17 00:00:00 2001 From: Jorge Romero Date: Fri, 6 Feb 2026 12:10:09 +0100 Subject: [PATCH 1/8] Feature/quickstarter automatic tests improvements (#1362) --- .../continuous-integration-workflow.yml | 6 +- .gitignore | 1 + CHANGELOG.md | 2 +- jenkins/master/Dockerfile.ubi8 | 2 +- tests/Makefile | 4 +- tests/README.md | 37 + tests/create-projects/jenkinsfile_test.go | 7 +- tests/dev-test.sh | 200 +++ tests/go.mod | 43 +- tests/go.sum | 85 +- tests/quickstarter-test.sh | 132 +- tests/quickstarter/QUICKSTARTERS_TESTS.md | 1135 +++++++++++++++++ tests/quickstarter/TODO.md | 22 + tests/quickstarter/bitbucket.go | 33 - tests/quickstarter/diagnostics/diagnostics.go | 172 +++ tests/quickstarter/docs/adding-step-types.md | 238 ++++ tests/quickstarter/docs/steps.md | 147 +++ tests/quickstarter/docs/steps/bitbucket.md | 215 ++++ tests/quickstarter/docs/steps/build.md | 97 ++ .../quickstarter/docs/steps/expose-service.md | 147 +++ tests/quickstarter/docs/steps/http.md | 149 +++ tests/quickstarter/docs/steps/inspect.md | 146 +++ tests/quickstarter/docs/steps/provision.md | 437 +++++++ tests/quickstarter/docs/steps/run.md | 205 +++ tests/quickstarter/docs/steps/upload.md | 178 +++ tests/quickstarter/docs/steps/wait.md | 143 +++ tests/quickstarter/logger/logger.go | 220 ++++ tests/quickstarter/openshift.go | 38 - tests/quickstarter/quickstarter_test.go | 436 ++----- tests/quickstarter/reporting/export.go | 97 ++ tests/quickstarter/reporting/reporting.go | 148 +++ .../resources/Jenkinsfile-create-jobs | 160 +++ tests/quickstarter/resources/Jenkinsfile-qs | 189 +++ tests/quickstarter/resources/README.md | 87 ++ .../resources/quickstarter-test.env.template | 62 + .../resources/scripts/create_jobs.sh | 430 +++++++ .../resources/scripts/job_template.xml | 78 ++ .../resources/scripts/run_all.xml | 233 ++++ tests/quickstarter/sonarqube.go | 54 - tests/quickstarter/steps.go | 130 +- tests/quickstarter/steps/bitbucket.go | 754 +++++++++++ tests/quickstarter/steps/build.go | 56 + tests/quickstarter/steps/executor.go | 167 +++ tests/quickstarter/steps/expose_service.go | 62 + tests/quickstarter/steps/golden.go | 45 + tests/quickstarter/steps/http.go | 279 ++++ tests/quickstarter/steps/inspect.go | 270 ++++ tests/quickstarter/steps/openshift.go | 141 ++ tests/quickstarter/steps/portforward.go | 291 +++++ tests/quickstarter/steps/provision.go | 181 +++ tests/quickstarter/steps/registry.go | 219 ++++ tests/quickstarter/steps/run.go | 83 ++ tests/quickstarter/steps/template.go | 89 ++ tests/quickstarter/steps/types.go | 394 ++++++ tests/quickstarter/steps/upload.go | 14 + tests/quickstarter/steps/url_resolver.go | 258 ++++ tests/quickstarter/steps/verification.go | 250 ++++ tests/quickstarter/steps/wait.go | 295 +++++ .../delete-files-from-bitbucket-with-git.sh | 154 +++ tests/scripts/free-unused-resources.sh | 18 +- .../scripts/get-artifact-from-jenkins-run.sh | 17 +- tests/scripts/print-jenkins-log.sh | 2 +- .../scripts/print-jenkins-unittest-results.sh | 2 +- tests/scripts/print-sonar-scan-run.sh | 10 +- .../upload-file-to-bitbucket-with-git.sh | 143 +++ tests/scripts/upload-file-to-bitbucket.sh | 75 +- tests/utils/configmaps.go | 17 + tests/utils/constants.go | 7 - tests/utils/environment.go | 10 + tests/utils/filter-quickstarters.go | 55 + tests/utils/jenkins.go | 9 +- tests/utils/ods-env.go | 5 + tests/utils/openshift-client.go | 15 +- tests/utils/project_names.go | 15 + tests/utils/projects.go | 16 + tests/utils/provisioning.go | 6 +- tests/utils/resources.go | 183 ++- tests/utils/role-bindings.go | 28 - tests/utils/routes.go | 17 + tests/utils/secrets.go | 17 + tests/utils/serviceaccounts.go | 17 + tests/utils/types.go | 29 +- 82 files changed, 10277 insertions(+), 783 deletions(-) create mode 100755 tests/dev-test.sh create mode 100644 tests/quickstarter/QUICKSTARTERS_TESTS.md create mode 100644 tests/quickstarter/TODO.md delete mode 100644 tests/quickstarter/bitbucket.go create mode 100644 tests/quickstarter/diagnostics/diagnostics.go create mode 100644 tests/quickstarter/docs/adding-step-types.md create mode 100644 tests/quickstarter/docs/steps.md create mode 100644 tests/quickstarter/docs/steps/bitbucket.md create mode 100644 tests/quickstarter/docs/steps/build.md create mode 100644 tests/quickstarter/docs/steps/expose-service.md create mode 100644 tests/quickstarter/docs/steps/http.md create mode 100644 tests/quickstarter/docs/steps/inspect.md create mode 100644 tests/quickstarter/docs/steps/provision.md create mode 100644 tests/quickstarter/docs/steps/run.md create mode 100644 tests/quickstarter/docs/steps/upload.md create mode 100644 tests/quickstarter/docs/steps/wait.md create mode 100644 tests/quickstarter/logger/logger.go delete mode 100644 tests/quickstarter/openshift.go create mode 100644 tests/quickstarter/reporting/export.go create mode 100644 tests/quickstarter/reporting/reporting.go create mode 100644 tests/quickstarter/resources/Jenkinsfile-create-jobs create mode 100644 tests/quickstarter/resources/Jenkinsfile-qs create mode 100644 tests/quickstarter/resources/README.md create mode 100644 tests/quickstarter/resources/quickstarter-test.env.template create mode 100755 tests/quickstarter/resources/scripts/create_jobs.sh create mode 100644 tests/quickstarter/resources/scripts/job_template.xml create mode 100644 tests/quickstarter/resources/scripts/run_all.xml delete mode 100644 tests/quickstarter/sonarqube.go create mode 100644 tests/quickstarter/steps/bitbucket.go create mode 100644 tests/quickstarter/steps/build.go create mode 100644 tests/quickstarter/steps/executor.go create mode 100644 tests/quickstarter/steps/expose_service.go create mode 100644 tests/quickstarter/steps/golden.go create mode 100644 tests/quickstarter/steps/http.go create mode 100644 tests/quickstarter/steps/inspect.go create mode 100644 tests/quickstarter/steps/openshift.go create mode 100644 tests/quickstarter/steps/portforward.go create mode 100644 tests/quickstarter/steps/provision.go create mode 100644 tests/quickstarter/steps/registry.go create mode 100644 tests/quickstarter/steps/run.go create mode 100644 tests/quickstarter/steps/template.go create mode 100644 tests/quickstarter/steps/types.go create mode 100644 tests/quickstarter/steps/upload.go create mode 100644 tests/quickstarter/steps/url_resolver.go create mode 100644 tests/quickstarter/steps/verification.go create mode 100644 tests/quickstarter/steps/wait.go create mode 100755 tests/scripts/delete-files-from-bitbucket-with-git.sh create mode 100755 tests/scripts/upload-file-to-bitbucket-with-git.sh create mode 100644 tests/utils/configmaps.go delete mode 100644 tests/utils/constants.go create mode 100644 tests/utils/environment.go create mode 100644 tests/utils/filter-quickstarters.go create mode 100644 tests/utils/project_names.go delete mode 100644 tests/utils/role-bindings.go create mode 100644 tests/utils/routes.go create mode 100644 tests/utils/secrets.go create mode 100644 tests/utils/serviceaccounts.go diff --git a/.github/workflows/continuous-integration-workflow.yml b/.github/workflows/continuous-integration-workflow.yml index 5465d18ac..eb37c7b32 100644 --- a/.github/workflows/continuous-integration-workflow.yml +++ b/.github/workflows/continuous-integration-workflow.yml @@ -180,10 +180,10 @@ jobs: name: Checkout repository uses: actions/checkout@v4.1.6 - - name: Setup Go 1.18 + name: Setup Go 1.24 uses: actions/setup-go@v5 with: - go-version: 1.18 + go-version: 1.24 - name: Verify all Go files are formatted with gofmt working-directory: tests @@ -201,7 +201,7 @@ jobs: name: Verify all Go tests pass linting uses: golangci/golangci-lint-action@v6 with: - version: v1.49.0 + version: v1.64.7 working-directory: tests args: --timeout=10m - diff --git a/.gitignore b/.gitignore index 9f39fe1bb..76df78b6f 100644 --- a/.gitignore +++ b/.gitignore @@ -21,3 +21,4 @@ output-vmware-iso prov-app-config.txt headers.txt response.txt +.cache/ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 58fba0ec8..7191c3c11 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,7 @@ ### Added ### Changed - +- Improved automatic tests for the quickstarters ([#1362](https://github.com/opendevstack/ods-core/pull/1362)) ### Fixed ## [4.11.1] - 2025-12-05 diff --git a/jenkins/master/Dockerfile.ubi8 b/jenkins/master/Dockerfile.ubi8 index 40639289b..318a75c1a 100644 --- a/jenkins/master/Dockerfile.ubi8 +++ b/jenkins/master/Dockerfile.ubi8 @@ -18,7 +18,7 @@ USER root COPY yum.repos.d/ubi.repo /etc/yum.repos.d/ubi.repo COPY ./scripts_for_usr-local-bin/* /usr/local/bin/ -RUN rpm --import https://pkg.jenkins.io/redhat-stable/jenkins.io.key \ +RUN rpm --import https://pkg.jenkins.io/rpm-stable/jenkins.io-2026.key \ && disable_yum_repository.sh /etc/yum.repos.d/ci-rpm-mirrors.repo \ /etc/yum.repos.d/localdev-* /etc/yum.repos.d/epel.repo \ && ensure_java_jre_is_adequate.sh master \ diff --git a/tests/Makefile b/tests/Makefile index 56fa45058..2dc681b46 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -9,6 +9,7 @@ QS := ods-quickstarters/... ### By default we do not parallelize tests PARALLEL := 1 +PROJECT := unitt ## Full test of existing ODS core installation. Caution: Creates UNITT project and ODSVERIFY project. test: smoketest verify test-create-projects @@ -35,12 +36,13 @@ test-create-projects: ## Run quickstarter tests within existing ODS installation. Depends on UNITT project. test-quickstarter: - @(./quickstarter-test.sh $(QS) $(PARALLEL)) + @(./quickstarter-test.sh -p $(PROJECT) -q $(QS) -pa $(PARALLEL)) .PHONY: test-quickstarter ## Install tools required for tests. prep-tools: which go-junit-report || go get github.com/jstemmer/go-junit-report + which golangci-lint || curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(shell go env GOPATH)/bin v1.50.0 .PHONY: prep-tools ## Lint diff --git a/tests/README.md b/tests/README.md index 9292aaa3c..467dddd0a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -25,5 +25,42 @@ Run `make test` [in this directory](Makefile), which will execute project creati ## Running the quickstarter tests Run `make test-quickstarter` [in this directory](Makefile). By default, this will test all quickstarters in `ods-quickstarters` located next to `ods-core`. You can run just one specific quickstarter test with `make test-quickstarter QS=be-golang-plain` or run tests located in a custom directory like this: `make test-quickstarter QS=my-quickstarters/...` or `make test-quickstarter QS=my-quickstarters/foobar`. By default all tests run sequentially. To run some in parallel, use e.g. `make test-quickstarter PARALLEL=3`. +### Keeping resources after tests +By default, the quickstarter tests clean up all created resources (OpenShift resources, Helm releases, etc.) after each test completes. To keep the resources for debugging or inspection purposes, set the `KEEP_RESOURCES` environment variable: + +```bash +KEEP_RESOURCES=true make test-quickstarter QS=be-python-flask +``` + +Or for the shell script: +```bash +KEEP_RESOURCES=true ./dev-test.sh be-python-flask e2etsqs +``` + +**Note:** Port-forward cleanup is handled separately and will still occur to prevent resource leaks. + ## Authoring quickstarter tests Quickstarters must have a `testdata` directory, which needs to contain a `steps.yml` file describing which test steps to execute in YAML format. The allowed fields are defined by https://pkg.go.dev/github.com/opendevstack/ods-core/tests/quickstarter. Typically, the `testdata` directory will also contain a `golden` folder with JSON files describing the expected results. See https://github.com/opendevstack/ods-quickstarters/tree/master/be-golang-plain/testdata as an example. + +### Specifying namespace for OpenShift resource verification + +When verifying OpenShift resources, you can optionally specify a custom namespace. If not specified, the verification defaults to the `{{.ProjectID}}-dev` namespace. + +Example in `testdata/steps.yml`: + +```yaml +- type: provision + provisionParams: + verify: + openShiftResources: + namespace: "test" # Will check in {{.ProjectID}}-test + services: + - "{{.ComponentID}}" + deploymentConfigs: + - "{{.ComponentID}}" +``` + +You can also specify a full namespace (with project prefix): +```yaml + namespace: "{{.ProjectID}}-cd" # Explicit full namespace +``` diff --git a/tests/create-projects/jenkinsfile_test.go b/tests/create-projects/jenkinsfile_test.go index a642f484d..73ea5f1fe 100644 --- a/tests/create-projects/jenkinsfile_test.go +++ b/tests/create-projects/jenkinsfile_test.go @@ -5,8 +5,9 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" + "os" "path" "runtime" "strings" @@ -101,7 +102,7 @@ func TestCreateProjectThruWebhookProxyJenkinsFile(t *testing.T) { defer reponse.Body.Close() - bodyBytes, _ := ioutil.ReadAll(reponse.Body) + bodyBytes, _ := io.ReadAll(reponse.Body) if reponse.StatusCode >= http.StatusAccepted { t.Fatalf("Could not post request: %s", string(bodyBytes)) @@ -154,7 +155,7 @@ func TestCreateProjectThruWebhookProxyJenkinsFile(t *testing.T) { t.Fatal(err) } - expected, err := ioutil.ReadFile("golden/jenkins-create-project-stages.json") + expected, err := os.ReadFile("golden/jenkins-create-project-stages.json") if err != nil { t.Fatal(err) } diff --git a/tests/dev-test.sh b/tests/dev-test.sh new file mode 100755 index 000000000..91751f5aa --- /dev/null +++ b/tests/dev-test.sh @@ -0,0 +1,200 @@ +#!/usr/bin/env bash +set -eu -o pipefail + +# Developer-friendly test runner for quickstarters +# This script provides a simple interface to run quickstarter tests +# with automatic environment detection and helpful output + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +ODS_CORE_DIR="${SCRIPT_DIR}/.." + +# Default values +QUICKSTARTER="${1:-be-python-flask}" +PROJECT="${2:-devtest}" +PARALLEL="${3:-1}" + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +function print_header() { + echo -e "\n${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}" + echo -e "${BLUE} $1${NC}" + echo -e "${BLUE}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━${NC}\n" +} + +function print_info() { + echo -e "${BLUE}ℹ️ $1${NC}" +} + +function print_success() { + echo -e "${GREEN}✓ $1${NC}" +} + +function print_warning() { + echo -e "${YELLOW}⚠️ $1${NC}" +} + +function print_error() { + echo -e "${RED}✗ $1${NC}" +} + +function check_prerequisites() { + local missing_tools=() + + # Check for required tools + if ! command -v oc &> /dev/null; then + missing_tools+=("oc (OpenShift CLI)") + fi + + if ! command -v go &> /dev/null; then + missing_tools+=("go") + fi + + if ! command -v jq &> /dev/null; then + missing_tools+=("jq") + fi + + if [ ${#missing_tools[@]} -ne 0 ]; then + print_error "Missing required tools:" + for tool in "${missing_tools[@]}"; do + echo " - $tool" + done + echo "" + echo "Please install the missing tools and try again." + exit 1 + fi + + # Check if logged into OpenShift + if ! oc whoami &> /dev/null; then + print_error "Not logged into OpenShift" + echo "Please run 'oc login' first." + exit 1 + fi + + print_success "All prerequisites met" +} + +function detect_environment() { + print_header "Environment Detection" + + # Check if running in cluster + if [ -n "${KUBERNETES_SERVICE_HOST:-}" ]; then + print_info "Execution environment: Inside Kubernetes/OpenShift cluster" + print_info "Network strategy: Service DNS (no port-forwards needed)" + else + print_info "Execution environment: Local development machine" + print_info "Network strategy: Automatic (routes > port-forward > service DNS)" + print_warning "Port-forwards will be set up automatically as needed" + fi + + # Show current OpenShift context + current_user=$(oc whoami 2>/dev/null || echo "unknown") + current_server=$(oc whoami --show-server 2>/dev/null || echo "unknown") + + print_info "OpenShift user: $current_user" + print_info "OpenShift server: $current_server" +} + +function show_test_info() { + print_header "Test Configuration" + + echo " Quickstarter: $QUICKSTARTER" + echo " Project: $PROJECT" + echo " Parallelism: $PARALLEL" + echo "" +} + +function run_tests() { + print_header "Running Tests" + + cd "$ODS_CORE_DIR/tests" + + # Run the quickstarter test + print_info "Executing: make test-quickstarter QS=$QUICKSTARTER PROJECT=$PROJECT PARALLEL=$PARALLEL" + echo "" + + if make test-quickstarter QS="$QUICKSTARTER" PROJECT="$PROJECT" PARALLEL="$PARALLEL"; then + print_success "Tests passed!" + return 0 + else + print_error "Tests failed!" + return 1 + fi +} + +function show_usage() { + cat << EOF +Usage: $0 [QUICKSTARTER] [PROJECT] [PARALLEL] + +Developer-friendly test runner for ODS quickstarters. + +Arguments: + QUICKSTARTER Quickstarter to test (default: be-python-flask) + Examples: + - be-python-flask (single quickstarter) + - ods-quickstarters/... (all quickstarters) + - be-golang-plain (another single quickstarter) + + PROJECT OpenShift project name for testing (default: devtest) + + PARALLEL Number of tests to run in parallel (default: 1) + +Examples: + # Test be-python-flask in 'devtest' project + $0 + + # Test specific quickstarter in custom project + $0 be-golang-plain myproject + + # Test all quickstarters with parallelism + $0 ods-quickstarters/... testproj 3 + +Features: + ✓ Automatic environment detection (in-cluster vs local) + ✓ Smart URL resolution (routes > port-forward > service DNS) + ✓ Automatic port-forward setup for local development + ✓ Automatic cleanup on exit or interrupt (Ctrl+C) + ✓ Clear, colorful output + +Network Access: + When running locally, the test framework will automatically: + 1. Try to use OpenShift routes if they exist (fastest, most reliable) + 2. Set up port-forwards for services without routes + 3. Fall back to service DNS if running inside the cluster + + You don't need to manually set up port-forwards - it's all automatic! + +EOF +} + +# Main execution +main() { + # Show usage if help requested + if [[ "${1:-}" == "-h" || "${1:-}" == "--help" ]]; then + show_usage + exit 0 + fi + + print_header "ODS Quickstarter Test Runner" + + check_prerequisites + detect_environment + show_test_info + + if run_tests; then + print_header "Test Summary" + print_success "All tests completed successfully!" + echo "" + exit 0 + else + print_header "Test Summary" + print_error "Some tests failed. Check the output above for details." + exit 1 + fi +} + +main "$@" diff --git a/tests/go.mod b/tests/go.mod index 2a00fafd6..90538e5a3 100644 --- a/tests/go.mod +++ b/tests/go.mod @@ -1,33 +1,58 @@ module github.com/opendevstack/ods-core/tests -go 1.13 +go 1.24.0 require ( + github.com/charmbracelet/log v0.3.1 github.com/ghodss/yaml v1.0.0 + github.com/google/go-cmp v0.6.0 + github.com/jstemmer/go-junit-report v0.9.1 + github.com/openshift/api v0.0.0-20180801171038-322a19404e37 + github.com/openshift/client-go v3.9.0+incompatible + github.com/tidwall/gjson v1.17.0 + k8s.io/api v0.0.0-20190222213804-5cb15d344471 + k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 + k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 +) + +require ( + github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect + github.com/charmbracelet/lipgloss v0.9.1 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.4.2 // indirect github.com/google/btree v1.0.0 // indirect - github.com/google/go-cmp v0.5.1 + github.com/google/gofuzz v1.0.0 // indirect github.com/googleapis/gnostic v0.4.0 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/imdario/mergo v0.3.8 // indirect github.com/json-iterator/go v1.1.8 // indirect - github.com/jstemmer/go-junit-report v0.9.1 github.com/kr/pretty v0.2.1 // indirect + github.com/lucasb-eyer/go-colorful v1.2.0 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.15 // indirect + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 // indirect github.com/modern-go/reflect2 v1.0.1 // indirect - github.com/openshift/api v0.0.0-20180801171038-322a19404e37 - github.com/openshift/client-go v3.9.0+incompatible + github.com/muesli/reflow v0.3.0 // indirect + github.com/muesli/termenv v0.15.2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/rivo/uniseg v0.2.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/crypto v0.17.0 // indirect + github.com/tidwall/match v1.1.1 // indirect + github.com/tidwall/pretty v1.2.0 // indirect + golang.org/x/crypto v0.47.0 // indirect + golang.org/x/exp v0.0.0-20260112195511-716be5621a96 // indirect + golang.org/x/net v0.49.0 // indirect golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 // indirect + golang.org/x/sys v0.40.0 // indirect + golang.org/x/term v0.39.0 // indirect + golang.org/x/text v0.33.0 // indirect golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect + google.golang.org/appengine v1.4.0 // indirect + google.golang.org/protobuf v1.23.0 // indirect gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.2.7 // indirect - k8s.io/api v0.0.0-20190222213804-5cb15d344471 - k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 - k8s.io/client-go v0.0.0-20190228174230-b40b2a5939e4 k8s.io/klog v1.0.0 // indirect sigs.k8s.io/yaml v1.1.0 // indirect ) diff --git a/tests/go.sum b/tests/go.sum index 0e4ed6ee5..3ea026248 100644 --- a/tests/go.sum +++ b/tests/go.sum @@ -1,10 +1,18 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k= +github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8= +github.com/charmbracelet/lipgloss v0.9.1 h1:PNyd3jvaJbg4jRHKWXnCj1akQm4rh8dbEzN1p/u1KWg= +github.com/charmbracelet/lipgloss v0.9.1/go.mod h1:1mPmG4cxScwUQALAAnacHaigiiHB9Pmr+v1VEawJl6I= +github.com/charmbracelet/log v0.3.1 h1:TjuY4OBNbxmHWSwO3tosgqs5I3biyY8sQPny/eCMTYw= +github.com/charmbracelet/log v0.3.1/go.mod h1:OR4E1hutLsax3ZKpXbgUqPtTjQfrh1pG3zwHGWuuq8g= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -24,8 +32,8 @@ github.com/google/gnostic v0.4.0/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrA github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= @@ -43,11 +51,22 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= +github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s= +github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8= +github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo= +github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8= github.com/openshift/api v0.0.0-20180801171038-322a19404e37 h1:05irGU4HK4IauGGDbsk+ZHrm1wOzMLYjMlfaiqMrBYc= github.com/openshift/api v0.0.0-20180801171038-322a19404e37/go.mod h1:dh9o4Fs58gpFXGSYfnVxGR9PnV53I8TW84pQaJDdGiY= github.com/openshift/client-go v3.9.0+incompatible h1:13k3Ok0B7TA2hA3bQW2aFqn6y04JaJWdk7ITTyg+Ek0= @@ -56,79 +75,67 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/tidwall/gjson v1.17.0 h1:/Jocvlh98kcTfpN2+JzGQWQcqrPQwDrVEMApx/M5ZwM= +github.com/tidwall/gjson v1.17.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8= +golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96 h1:Z/6YuSHTLOHfNFdb8zVZomZr7cqNgTJvA8+Qz75D8gU= +golang.org/x/exp v0.0.0-20260112195511-716be5621a96/go.mod h1:nzimsREAkjBCIEFtHiYkrJyT+2uy9YZJB7H1k68CXZU= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o= +golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ= +golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY= +golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE= +golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -147,6 +154,8 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.0.0-20190222213804-5cb15d344471 h1:MzQGt8qWQCR+39kbYRd0uQqsvSidpYqJLFeWiJ9l4OE= k8s.io/api v0.0.0-20190222213804-5cb15d344471/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/apimachinery v0.0.0-20190221213512-86fb29eff628 h1:UYfHH+KEF88OTg+GojQUwFTNxbxwmoktLwutUzR0GPg= diff --git a/tests/quickstarter-test.sh b/tests/quickstarter-test.sh index ff78c1db4..b099021de 100755 --- a/tests/quickstarter-test.sh +++ b/tests/quickstarter-test.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -set -eu +set -u set -o pipefail export CGO_ENABLED=0 @@ -7,58 +7,98 @@ THIS_SCRIPT="$(basename $0)" # By default we run all quickstarter tests, otherwise just the quickstarter # passed as the first argument to this script. -QUICKSTARTER=${1-"ods-quickstarters/..."} -PARALLEL=${2-"1"} +BITBUCKET_TEST_PROJECT="unitt" +QUICKSTARTER="ods-quickstarters/..." +PARALLEL="1" SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" ODS_CORE_DIR=${SCRIPT_DIR%/*} -if ! oc whoami &> /dev/null; then - echo "${THIS_SCRIPT}: You need to login to OpenShift to run the tests" - echo "${THIS_SCRIPT}: Returning with exit code 1" - exit 1 -fi +function check_already_logged_in_openshift(){ + if ! oc whoami &> /dev/null; then + echo "${THIS_SCRIPT}: You need to login to OpenShift to run the tests" + echo "${THIS_SCRIPT}: Returning with exit code 1" + exit 1 + fi +} -if [ -f test-quickstarter-results.txt ]; then - rm test-quickstarter-results.txt -fi +function cleanup_workspace(){ + if [ -f test-quickstarter-results.txt ]; then + rm test-quickstarter-results.txt + fi +} -BITBUCKET_TEST_PROJECT="unitt" -echo "Setup Bitbucket test project ${BITBUCKET_TEST_PROJECT} ..." -BITBUCKET_URL=$(${ODS_CORE_DIR}/scripts/get-config-param.sh BITBUCKET_URL) -CD_USER_ID=$(${ODS_CORE_DIR}/scripts/get-config-param.sh CD_USER_ID) -CD_USER_PWD_B64=$(${ODS_CORE_DIR}/scripts/get-config-param.sh CD_USER_PWD_B64) -./scripts/setup-bitbucket-test-project.sh \ - --bitbucket=${BITBUCKET_URL} \ - --user=${CD_USER_ID} \ - --password=$(base64 -d - <<< ${CD_USER_PWD_B64}) \ - --project=${BITBUCKET_TEST_PROJECT} - -echo " " -echo "${THIS_SCRIPT}: Cleaning a little bit the host machine to not suffer from limitated resources... " -echo " " -if [ -f ./scripts/free-unused-resources.sh ]; then - chmod +x ./scripts/free-unused-resources.sh - ./scripts/free-unused-resources.sh || true -else - echo "Not found script ./scripts/free-unused-resources.sh " -fi +function generate_results(){ + echo "Process results" + cd $ODS_CORE_DIR/tests + if [ -f test-quickstarter-results.txt ]; then + go-junit-report < test-quickstarter-results.txt > test-quickstarter-report.xml + cat -v test-quickstarter-results.txt > test-output + go-junit-report < test-output > test-quickstarter-report.xml + csplit -z test-quickstarter-results.txt '/=== CONT/' {*} + rm xx00 + for file in xx*; do + newName=$(grep -oP -m 1 'TestQuickstarter/\K\w+.*' $file) + mv $file $newName.txt + done + fi +} + +function run_test(){ + echo " " + echo "${THIS_SCRIPT}: Running tests (${QUICKSTARTER}). Output will take a while to arrive ..." + echo " " + + + # Should fix error " panic: test timed out after " + echo "${THIS_SCRIPT}: go test -v -count=1 -timeout 30h -parallel ${PARALLEL} github.com/opendevstack/ods-core/tests/quickstarter -args ${QUICKSTARTER}" + go test -v -count=1 -timeout 30h -parallel ${PARALLEL} github.com/opendevstack/ods-core/tests/quickstarter -args ${QUICKSTARTER} ${BITBUCKET_TEST_PROJECT} | tee test-quickstarter-results.txt 2>&1 + + exitcode="${PIPESTATUS[0]}" + + echo " " + echo " " + echo "${THIS_SCRIPT}: Returning with exit code ${exitcode}" + echo " " + echo " " -echo " " -echo "${THIS_SCRIPT}: Running tests (${QUICKSTARTER}). Output will take a while to arrive ..." -echo " " + generate_results -# Should fix error " panic: test timed out after " -echo "${THIS_SCRIPT}: go test -v -count=1 -timeout 30h -parallel ${PARALLEL} github.com/opendevstack/ods-core/tests/quickstarter -args ${QUICKSTARTER}" -go test -v -count=1 -timeout 30h -parallel ${PARALLEL} github.com/opendevstack/ods-core/tests/quickstarter -args ${QUICKSTARTER} | tee test-quickstarter-results.txt 2>&1 -exitcode="${PIPESTATUS[0]}" -if [ -f test-quickstarter-results.txt ]; then - go-junit-report < test-quickstarter-results.txt > test-quickstarter-report.xml + exit $exitcode +} + + +function usage { + printf "Run quickstarters tests.\n\n" + printf "\t-h |--help\t\tPrint usage\n" + printf "\t-p |--project\t\tBitbucket project (* mandatory)\n" + printf "\t-pa|--parallel\t\tNumber of test executed in parallel\n" + printf "\t-q |--quickstarter\tQuickStarter to test or Quickstarter folder (default:ods-quickstarters/...)\n" +} + +while [[ "$#" -gt 0 ]]; do + case $1 in + + -h|--help) usage; exit 0;; + + -pa|--parallel) PARALLEL="$2"; shift;; + -pa=*|--parallel=*) PARALLEL="${1#*=}";; + + -q|--quickstarter) QUICKSTARTER="$2"; shift;; + -q=*|--quickstarter=*) QUICKSTARTER="${1#*=}";; + + -p|--project) BITBUCKET_TEST_PROJECT="$2"; shift;; + -p=*|--project=*) BITBUCKET_TEST_PROJECT="${1#*=}";; + + *) echo_error "Unknown parameter passed: $1"; exit 1;; +esac; shift; done + +if [ -z "${BITBUCKET_TEST_PROJECT}" ]; then + echo "--project is mandatory" + usage + exit 1 fi -echo " " -echo " " -echo "${THIS_SCRIPT}: Returning with exit code ${exitcode}" -echo " " -echo " " -exit $exitcode +check_already_logged_in_openshift +cleanup_workspace +run_test diff --git a/tests/quickstarter/QUICKSTARTERS_TESTS.md b/tests/quickstarter/QUICKSTARTERS_TESTS.md new file mode 100644 index 000000000..1597f391a --- /dev/null +++ b/tests/quickstarter/QUICKSTARTERS_TESTS.md @@ -0,0 +1,1135 @@ +# Quickstarters Test Framework + +## Index +- [Overview](#overview) +- [Prerequisites](#prerequisites) +- [Environment Setup](resources/README.md) +- [How to Run](#how-to-run) +- [Logging and Output](#logging-and-output) +- [Templates and Variables](#templates-and-variables) +- [Testdata Layout](#testdata-layout) +- [Step Types (How to Use)](#step-types-how-to-use) + - [provision](#provision) + - [build](#build) + - [upload](#upload) + - [wait](#wait) + - [http](#http) + - [inspect](#inspect) + - [expose-service](#expose-service) + - [run](#run) +- [Advanced Features](#advanced-features) + - [Test Lifecycle Hooks](#test-lifecycle-hooks) + - [Step Execution Control](#step-execution-control) + - [Retry Logic](#retry-logic) + - [Test Reporting](#test-reporting) +- [Complete Example (steps.yml + run script)](#complete-example-stepsyml--run-script) +- [Service URL Resolution](#service-url-resolution) +- [Migration Guidance](#migration-guidance) +- [Developing Custom Step Types](#developing-custom-step-types) +- [Troubleshooting](#troubleshooting) +- [Best Practices](#best-practices) + +## Overview +Step-based tests driven by `testdata/steps.yml` to validate quickstarters. + +A test is a sequence of steps such as: +- provision a quickstarter +- upload config/fixtures +- build/deploy +- wait for readiness +- expose service URLs +- call HTTP endpoints +- inspect logs/env/resources +- run an end-to-end shell script + +## Prerequisites +- Logged in to the target OpenShift cluster (`oc whoami`). +- `oc`, `curl`, and `jq` available locally. +- Quickstarter repo contains a `testdata` folder with `steps.yml`. + +## How to Run +- Wrapper (recommended): + ```bash + cd ods-core/tests + ./dev-test.sh + ``` +- Make: + ```bash + cd ods-core/tests + make test-quickstarter QS= PROJECT_NAME= + ``` + +## Logging and Output + +### Structured Logging with Colors and Emojis + +The test framework uses [charmbracelet/log](https://github.com/charmbracelet/log) to provide structured, readable logging with colors and emojis for better visibility and ease of following test execution. + +#### Output Features: +- **🚀 Sections**: Major test milestones are marked with visual section headers +- **📋 Sub-sections**: Logical groupings within a test use sub-section headers +- **▶️ Step Markers**: Each step execution is prefixed with the step number and type +- **✅ Success Messages**: Completed operations are marked with green checkmarks +- **❌ Error Messages**: Failed operations are marked with red X symbols +- **⚙️ Running Operations**: Ongoing operations show a gear symbol +- **⏳ Waiting Indicators**: Operations in waiting states show a hourglass +- **⚠️ Warnings**: Important warnings use the warning symbol + +#### Example Log Output: +``` +🚀 ╔═════════════════════════════════════════════════════════════╗ + 🚀 Starting Quickstarter Test Framework +🚀 ╚═════════════════════════════════════════════════════════════╝ + +🚀 ╔═════════════════════════════════════════════════════════════╗ + 🚀 Test Paths +🚀 ╚═════════════════════════════════════════════════════════════╝ + + • Found 2 quickstarter(s) to test: + • ./quickstarters/be-java-springboot + • ./quickstarters/fe-angular + +🚀 ╔═════════════════════════════════════════════════════════════╗ + 🚀 Testing Quickstarter: be-java-springboot +🚀 ╚═════════════════════════════════════════════════════════════╝ + +┌───────────────────────────────────────────────────────────────── + 📋 Component: myapp +┌───────────────────────────────────────────────────────────────── + + • Total steps to execute: 3 + +▶️ Step 1/3 [provision]: Provision quickstarter + +⚙️ Running: Provision for test-myapp +✅ Success: BitBucket repo created/updated + +⏳ Waiting: Jenkins pipeline execution +✅ Success: Build triggered with name jenkins-1234 + +▶️ Step 2/3 [build]: Trigger build pipeline + +... +``` + +#### Environment Variable Support: + +You can control logging verbosity by setting the `LOG_LEVEL` environment variable: +```bash +LOG_LEVEL=debug go test -v -run TestQuickstarter -timeout 30m \ + -args -quickstarter= -project= +``` + +#### Color Legend: +- **Cyan** (#00d7ff): Information messages +- **Yellow** (#ffaf00): Warning messages +- **Red** (#ff005f): Error messages +- **Green** (#00ff00): Success indicators + +## Templates and Variables +All string fields in `steps.yml` support Go-template rendering. + +Common template variables: +- `{{.ProjectID}}`: the project passed to the test +- `{{.ComponentID}}`: the component passed to the test (or overridden per step via `componentID`) + +Common environment variables passed to `run` scripts: +- `PROJECT_ID`, `COMPONENT_ID`, `NAMESPACE` (defaults to `-dev`) +- `ODS_NAMESPACE`, `ODS_GIT_REF`, `ODS_IMAGE_TAG` (when available) +- `{ALIAS}_SERVICE_URL` for each entry under `runParams.services` (e.g. `API_SERVICE_URL`) + +## Testdata Layout +Typical structure in a quickstarter repo: + +```text +testdata/ +├── steps.yml +├── golden/ +│ ├── jenkins-provision-stages.json +│ ├── jenkins-build-stages.json +│ └── sonar-scan.json +└── functional/ + ├── api/ + │ └── health-response.json + └── integration/ + └── e2e_test.sh +``` + +Key principles: +- Use templates and internal service DNS in `steps.yml` URLs. +- Avoid hardcoding localhost and manual port-forwarding. +- For `run` steps that need URLs, declare services in `runParams.services` and consume `{ALIAS}_SERVICE_URL`. + +## Step Types (How to Use) +The YAML file is a list under `steps:`: + +```yaml +steps: + - type: + description: Optional human-friendly description + componentID: Optional override for this step + Params: + ... +``` + +### provision +Provision via ODS; optionally verify Jenkins provision stages. + +Minimal example: +```yaml +- type: provision + provisionParams: + quickstarter: be-python-flask +``` + +With common options: +```yaml +- type: provision + description: Provision quickstarter + provisionParams: + quickstarter: be-python-flask + pipeline: "{{.ProjectID}}-{{.ComponentID}}" + branch: "master" + env: + - key: SOME_PARAM + value: "some-value" + verify: + strategy: fail-fast + jenkinsStages: golden/jenkins-provision-stages.json +``` + +### build +Build/deploy; optionally verify Jenkins stages, Sonar scan, test results, and OpenShift resources. + +Minimal example: +```yaml +- type: build + buildParams: {} +``` + +With verification: +```yaml +- type: build + description: Build and deploy + buildParams: + verify: + strategy: aggregate + jenkinsStages: golden/jenkins-build-stages.json + sonarScan: golden/sonar-scan.json + runAttachments: ["metadata.json"] + testResults: 5 + openShiftResources: + deployments: ["{{.ComponentID}}"] + services: ["{{.ComponentID}}", "{{.ComponentID}}-backend"] + routes: ["{{.ComponentID}}"] +``` + +### upload +Add a file into the created repository. + +```yaml +- type: upload + description: Upload config into repo + uploadParams: + file: fixtures/app-config.json + filename: config/app-config.json + render: true +``` + +Notes: +- `file` is relative to `testdata/`. +- `filename` is the destination path inside the provisioned repository. +- `render: true` applies templating to the file contents. + +### wait +Poll for readiness/conditions. + +Supported conditions: +- `pod-ready` (resource: selector like `-l app=...` or a pod name) +- `deployment-complete` (resource: `deployment/` or `dc/`) +- `job-complete` (resource: `job/`) +- `route-accessible` (resource: `route/`) +- `http-accessible` (url: `...`) +- `log-contains` (resource: `pod/`, `deployment/`, `dc/`; message: `...`) + +Examples: + +Deployment rollout: +```yaml +- type: wait + waitParams: + condition: deployment-complete + resource: "deployment/{{.ComponentID}}" + timeout: "10m" + interval: "5s" +``` + +Pod readiness by label: +```yaml +- type: wait + waitParams: + condition: pod-ready + resource: "-l app={{.ProjectID}}-{{.ComponentID}}" + timeout: "5m" + interval: "5s" +``` + +Log message appears: +```yaml +- type: wait + waitParams: + condition: log-contains + resource: "deployment/{{.ComponentID}}" + message: "Server listening" + timeout: "5m" + interval: "10s" +``` + +Route exists and is reachable: +```yaml +- type: wait + waitParams: + condition: route-accessible + resource: "route/{{.ComponentID}}" + timeout: "5m" + interval: "5s" +``` + +Arbitrary URL becomes reachable: +```yaml +- type: wait + waitParams: + condition: http-accessible + url: "http://{{.ComponentID}}.{{.ProjectID}}-dev.svc.cluster.local:8080/health" + timeout: "5m" + interval: "2s" +``` + +Job completes: +```yaml +- type: wait + waitParams: + condition: job-complete + resource: "job/{{.ProjectID}}-{{.ComponentID}}-migration" + timeout: "10m" + interval: "5s" +``` + +### http +Call endpoints with status/body/assertions and optional retries. + +Status + golden JSON body: +```yaml +- type: http + description: Health endpoint returns expected JSON + httpParams: + url: "http://{{.ComponentID}}.{{.ProjectID}}-dev.svc.cluster.local:8080/health" + method: GET + headers: + Accept: application/json + expectedStatus: 200 + expectedBody: functional/api/health-response.json + retry: + attempts: 10 + delay: "2s" +``` + +Assertions (JSONPath via `path`): +```yaml +- type: http + description: Assert JSON fields + httpParams: + url: "http://{{.ComponentID}}.{{.ProjectID}}-dev.svc.cluster.local:8080/health" + expectedStatus: 200 + assertions: + - path: "status" + equals: "ok" + - path: "version" + exists: true + - path: "message" + contains: "ready" + - path: "commit" + matches: "^[a-f0-9]{7,}$" +``` + +POST with JSON body and custom timeout: +```yaml +- type: http + description: Create resource + httpParams: + url: "http://{{.ComponentID}}.{{.ProjectID}}-dev.svc.cluster.local:8080/api/v1/items" + method: POST + headers: + Content-Type: application/json + Accept: application/json + body: '{"name":"example"}' + expectedStatus: 201 + timeout: 60 +``` + +### inspect +Check logs/env/resources for a resource. + +```yaml +- type: inspect + description: Verify runtime signals + inspectParams: + resource: "deployment/{{.ComponentID}}" + namespace: "{{.ProjectID}}-dev" + checks: + logs: + contains: ["Server listening on :8080"] + notContains: ["Traceback", "panic:"] + matches: ["Listening on.*8080"] + env: + APP_ENV: "dev" + ODS_PROJECT: "{{.ProjectID}}" + resources: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "100m" + memory: "128Mi" +``` + +### expose-service +Resolve stable URLs for one or more services and make them available to later `http` / `run` steps. + +```yaml +- type: expose-service + description: Expose services for local/Jenkins runs + exposeServiceParams: + services: + - serviceName: "{{.ComponentID}}" + namespace: "{{.ProjectID}}-dev" + port: "8080" + - serviceName: "{{.ComponentID}}-backend" + # namespace defaults to "-dev" if omitted + # port defaults to 8080 if omitted +``` + +Notes: +- Use one entry per Kubernetes/OpenShift Service. +- If you use `runParams.services`, ensure those service names are exposed here first. + +### run +Execute a shell script. If `runParams.services` is set, the script receives one env var per alias: `{ALIAS}_SERVICE_URL`. + +```yaml +- type: run + description: End-to-end tests + runParams: + file: functional/integration/e2e_test.sh + services: + api: "{{.ComponentID}}" + backend: "{{.ComponentID}}-backend" +``` + +Minimal script pattern: +```bash +#!/usr/bin/env bash +set -euo pipefail + +: "${API_SERVICE_URL:?missing API_SERVICE_URL}" +: "${BACKEND_SERVICE_URL:?missing BACKEND_SERVICE_URL}" + +curl -fsS "$API_SERVICE_URL/health" | jq -e '.status == "ok"' >/dev/null +curl -fsS "$BACKEND_SERVICE_URL/metrics" >/dev/null +``` + +## Advanced Features + +### Test Lifecycle Hooks + +Each step can execute shell scripts before and after execution. This is useful for setup, cleanup, or custom validation logic. + +#### beforeStep Hook +Executes a script before the main step. Useful for setup operations: + +```yaml +- type: build + description: Build and deploy with custom setup + beforeStep: "hooks/pre-build-setup.sh" + buildParams: + verify: + jenkinsStages: golden/jenkins-build-stages.json +``` + +Example `testdata/hooks/pre-build-setup.sh`: +```bash +#!/usr/bin/env bash +set -euo pipefail + +echo "Setting up build environment..." +export CUSTOM_BUILD_VAR="custom-value" +# Additional setup logic +``` + +#### afterStep Hook +Executes a script after the main step, even if the step fails. Useful for cleanup: + +```yaml +- type: http + description: Call API endpoint + afterStep: "hooks/post-http-cleanup.sh" + httpParams: + url: "http://{{.ComponentID}}.{{.ProjectID}}-dev.svc.cluster.local:8080/health" + expectedStatus: 200 +``` + +Example `testdata/hooks/post-http-cleanup.sh`: +```bash +#!/usr/bin/env bash +set -euo pipefail + +echo "Cleaning up HTTP test artifacts..." +# Cleanup logic that always runs +``` + +**Notes:** +- Hooks are executed relative to the `testdata/` directory. +- Hooks receive environment variables from template data. +- Hook failures in `beforeStep` will prevent the main step from executing. +- Hook failures in `afterStep` are logged but don't fail the test (useful for cleanup). +- Hooks support full bash scripting, including conditional logic. + +### Step Execution Control + +#### Skip Steps Conditionally + +You can skip steps based on static conditions or template expressions: + +**Static Skip:** +```yaml +- type: inspect + description: Optional diagnostic step (skipped in CI) + skip: true + inspectParams: + resource: "deployment/{{.ComponentID}}" +``` + +**Conditional Skip (Template Expression):** +```yaml +- type: build + description: Only build in non-production environments + skipIf: "{{eq .Environment \"production\"}}" + buildParams: {} +``` + +Template variables can be any standard Go template expression. Examples: +```yaml +- skipIf: "{{.IsProduction}}" # Boolean variable +- skipIf: "{{eq .Environment \"ci\"}}" # Environment comparison +- skipIf: "{{gt .Replicas 1}}" # Numeric comparison +``` + +#### Step-Level Timeout + +Override the default timeout for individual steps: + +```yaml +- type: wait + description: Wait for slow deployment + timeout: 900 # seconds (overrides default) + waitParams: + condition: deployment-complete + resource: "deployment/{{.ComponentID}}" +``` + +### Retry Logic + +Automatically retry steps on failure with configurable behavior: + +#### Basic Retry +```yaml +- type: http + description: API call with retry + retry: + attempts: 5 # Retry up to 5 times + delay: "2s" # Wait 2 seconds between attempts + httpParams: + url: "http://{{.ComponentID}}.{{.ProjectID}}-dev.svc.cluster.local:8080/health" + expectedStatus: 200 +``` + +#### Smart Transient Error Retry +Only retry on transient errors (timeouts, connection issues): + +```yaml +- type: wait + description: Wait with smart retry + retry: + attempts: 10 + delay: "1s" + onlyTransient: true # Skip retries for permanent errors + waitParams: + condition: http-accessible + url: "http://{{.ComponentID}}.{{.ProjectID}}-dev.svc.cluster.local:8080" + timeout: "5m" +``` + +**Transient Errors:** The framework automatically detects: +- Connection timeouts +- Connection refused +- Temporary unavailability +- EOF and broken pipes +- I/O timeouts + +### Test Reporting + +The test framework generates structured reports with execution metrics and can export to multiple formats. + +#### Automatic Report Generation + +Test reports are automatically generated and printed to console: + +``` +Test Report: be-java-springboot + Total Steps: 5 + Passed: 5 + Failed: 0 + Skipped: 0 + Success Rate: 100.00% + Total Duration: 2m30s + Avg Per Step: 30s +``` + +#### Export Reports to File + +Enable report export by setting an environment variable: + +```bash +EXPORT_TEST_REPORTS=true go test -v -run TestQuickstarter -timeout 30m \ + -args -quickstarter= -project= +``` + +This generates a `test-report-.json` file with detailed metrics: + +```json +{ + "startTime": "2024-01-21T10:30:00Z", + "endTime": "2024-01-21T10:32:30Z", + "totalDuration": 150000000000, + "quickstarterID": "be-java-springboot", + "steps": [ + { + "index": 0, + "type": "provision", + "description": "Provision quickstarter", + "startTime": "2024-01-21T10:30:00Z", + "endTime": "2024-01-21T10:30:30Z", + "duration": 30000000000, + "status": "passed", + "error": null, + "context": {} + } + ], + "summary": { + "totalSteps": 5, + "passedSteps": 5, + "failedSteps": 0, + "skippedSteps": 0, + "successRate": 100.0, + "averageDuration": 30000000000 + } +} +``` + +#### Report Contents + +Each report includes: +- **Execution Timeline:** Start/end times and duration for each step +- **Step Status:** Passed, failed, or skipped +- **Error Details:** Full error messages for failed steps +- **Context Information:** Pod logs, events, and environment at time of failure +- **Aggregate Statistics:** Pass rate, timing averages, counts by status + +#### CI/CD Integration + +Reports can be processed by CI/CD systems for: +- Trend analysis (run-to-run metrics) +- Performance regression detection +- Test flakiness tracking +- Automated failure notifications + +## Complete Example (steps.yml + run script) +Example `testdata/steps.yml` using all step types with advanced features: + +```yaml +steps: + - type: provision + description: Provision quickstarter + beforeStep: "hooks/pre-provision.sh" + provisionParams: + quickstarter: be-python-flask + branch: master + verify: + jenkinsStages: golden/jenkins-provision-stages.json + + - type: upload + description: Add runtime config + uploadParams: + file: fixtures/app-config.json + filename: config/app-config.json + render: true + + - type: build + description: Build and deploy + retry: + attempts: 2 + delay: "5s" + buildParams: + verify: + jenkinsStages: golden/jenkins-build-stages.json + sonarScan: golden/sonar-scan.json + testResults: 1 + openShiftResources: + deployments: ["{{.ComponentID}}"] + services: ["{{.ComponentID}}", "{{.ComponentID}}-backend"] + + - type: wait + description: Wait for rollout + waitParams: + condition: deployment-complete + resource: "deployment/{{.ComponentID}}" + timeout: 10m + interval: 5s + + - type: expose-service + description: Resolve external/local URLs for tests + exposeServiceParams: + services: + - serviceName: "{{.ComponentID}}" + port: "8080" + - serviceName: "{{.ComponentID}}-backend" + port: "8080" + + - type: http + description: Healthcheck with retry and assertions + retry: + attempts: 10 + delay: 2s + onlyTransient: true + httpParams: + url: "http://{{.ComponentID}}.{{.ProjectID}}-dev.svc.cluster.local:8080/health" + expectedStatus: 200 + assertions: + - path: "status" + equals: "ok" + + - type: inspect + description: Verify logs and env + inspectParams: + resource: "deployment/{{.ComponentID}}" + checks: + logs: + notContains: ["Traceback", "panic:"] + env: + ODS_PROJECT: "{{.ProjectID}}" + + - type: run + description: End-to-end shell test + runParams: + file: functional/integration/e2e_test.sh + services: + api: "{{.ComponentID}}" + backend: "{{.ComponentID}}-backend" + + # Optional: diagnostic step (skipped by default in CI) + - type: inspect + description: Diagnostic pod inspection (optional) + skip: true + inspectParams: + resource: "deployment/{{.ComponentID}}" +``` + +Example `testdata/functional/integration/e2e_test.sh`: + +```bash +#!/usr/bin/env bash +set -euo pipefail + +: "${PROJECT_ID:?missing PROJECT_ID}" +: "${COMPONENT_ID:?missing COMPONENT_ID}" +: "${NAMESPACE:?missing NAMESPACE}" + +: "${API_SERVICE_URL:?missing API_SERVICE_URL}" +: "${BACKEND_SERVICE_URL:?missing BACKEND_SERVICE_URL}" + +echo "Project: $PROJECT_ID" +echo "Component: $COMPONENT_ID" +echo "Namespace: $NAMESPACE" +echo "API: $API_SERVICE_URL" +echo "Backend: $BACKEND_SERVICE_URL" + +curl -fsS "$API_SERVICE_URL/health" | jq -e '.status == "ok"' >/dev/null +curl -fsS "$API_SERVICE_URL/api/v1/info" | jq -e '.name != null' >/dev/null +curl -fsS "$BACKEND_SERVICE_URL/metrics" >/dev/null + +echo "OK" +``` + +Example `testdata/hooks/pre-provision.sh`: + +```bash +#!/usr/bin/env bash +set -euo pipefail + +echo "Pre-provisioning checks..." +# Verify cluster connectivity +oc whoami > /dev/null || exit 1 +# Any custom setup logic +echo "Pre-provisioning checks passed" +``` + +## Service URL Resolution +Priority: +1) Route exists → use route URL (https/http). +2) In-cluster (Jenkins) → use service DNS. +3) Local → automatic `oc port-forward` on 8000–8009 with cleanup and reuse. + +## Migration Guidance +- Replace hardcoded localhost URLs with templated service DNS in `steps.yml`. +- Add an `expose-service` step for every service you need to access from local runs. +- Remove manual port-forwarding from scripts. +- In `run` steps, declare services under `runParams.services` and consume `{ALIAS}_SERVICE_URL`. +- Validate with `./dev-test.sh `. + +## Developing Custom Step Types + +The quickstarter test framework uses a **registry pattern** that makes it easy to add new step types without modifying the core test execution logic. This section explains how to implement and register custom step types. + +### Architecture Overview + +The framework consists of: +- **StepHandler Interface**: Defines the contract all step types must implement +- **StepRegistry**: Maps step type names to their handler implementations +- **ExecutionParams**: Consolidates all context needed for step execution +- **Handler Implementations**: Individual step type logic (upload, build, http, etc.) + +### Step 1: Define the Step Type Constant + +Add your new step type constant to `steps/types.go`: + +```go +const ( + StepTypeUpload = "upload" + StepTypeRun = "run" + // ... existing types ... + StepTypeMyCustom = "my-custom" // Add your new type here +) +``` + +### Step 2: Add Step Parameters to TestStep + +In `steps/types.go`, add a parameters struct for your step if needed: + +```go +// MyCustomParams defines parameters for the my-custom step type +type MyCustomParams struct { + // Add your custom fields here + Target string `json:"target"` + Options []string `json:"options"` + RetryCount int `json:"retryCount"` +} +``` + +Then add a field to the `TestStep` struct: + +```go +type TestStep struct { + Type string `json:"type"` + Description string `json:"description"` + // ... existing params ... + MyCustomParams *MyCustomParams `json:"myCustomParams,omitempty"` +} +``` + +### Step 3: Implement the Execution Logic + +Create a new file `steps/my_custom.go` with your step implementation: + +```go +package steps + +import ( + "fmt" + "testing" +) + +// ExecuteMyCustom handles the my-custom step type. +// This function contains the actual logic for your step. +func ExecuteMyCustom(t *testing.T, step TestStep, testdataPath string, + tmplData TemplateData, projectName string) { + + // Validate parameters + if step.MyCustomParams == nil { + t.Fatalf("Missing my-custom parameters") + } + + params := step.MyCustomParams + + // Implement your step logic here + fmt.Printf("Executing custom step with target: %s\n", params.Target) + + // Example: Run some operation + for _, option := range params.Options { + fmt.Printf("Processing option: %s\n", option) + // Your custom logic here + } + + // Use template data for dynamic values + renderedTarget := RenderTemplate(t, params.Target, tmplData) + fmt.Printf("Rendered target: %s\n", renderedTarget) + + // Fail the test if something goes wrong + if someCondition { + t.Fatalf("Custom step failed: %v", err) + } +} +``` + +### Step 4: Create a Handler Adapter + +In `steps/registry.go`, add a handler struct that implements the `StepHandler` interface: + +```go +// MyCustomHandler implements the handler for my-custom steps. +type MyCustomHandler struct{} + +func (h *MyCustomHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteMyCustom(t, *step, params.TestdataPath, params.TmplData, params.ProjectName) + return nil +} +``` + +**Note**: The handler adapter maps between the `StepHandler` interface (which receives `ExecutionParams`) and your specific execution function signature. + +### Step 5: Register the Handler + +In `steps/registry.go`, add your handler to the `registerDefaultHandlers()` function: + +```go +func registerDefaultHandlers() { + defaultRegistry.Register(StepTypeUpload, &UploadHandler{}) + defaultRegistry.Register(StepTypeRun, &RunHandler{}) + // ... existing registrations ... + defaultRegistry.Register(StepTypeMyCustom, &MyCustomHandler{}) // Add this line +} +``` + +### Step 6: Add Documentation + +Create `docs/steps/my-custom.md` documenting your new step type: + +```markdown +# my-custom Step + +## Purpose +Brief description of what this step does. + +## Parameters + +- `target` (string, required): Description of target parameter +- `options` (array, optional): Description of options +- `retryCount` (int, optional): Number of retry attempts + +## Example + +\```yaml +- type: my-custom + description: "Execute custom operation" + myCustomParams: + target: "{{.ComponentID}}" + options: + - "verbose" + - "debug" + retryCount: 3 +\``` + +## Common Use Cases +- Use case 1 +- Use case 2 +``` + +Update `docs/steps.md` to include your new step in the overview table. + +### Step 7: Write Unit Tests + +Create `steps/my_custom_test.go` to test your handler: + +```go +package steps + +import ( + "testing" +) + +func TestMyCustomHandler(t *testing.T) { + registry := DefaultRegistry() + + handler, err := registry.Get(StepTypeMyCustom) + if err != nil { + t.Fatalf("Expected my-custom handler to be registered: %v", err) + } + + if handler == nil { + t.Fatal("Handler should not be nil") + } + + // Test execution (may need mocking for complex steps) + step := &TestStep{ + Type: StepTypeMyCustom, + MyCustomParams: &MyCustomParams{ + Target: "test-target", + Options: []string{"opt1"}, + }, + } + + params := &ExecutionParams{ + TestdataPath: "/tmp/testdata", + TmplData: TemplateData{}, + ProjectName: "test-project", + } + + // Test that handler executes without panic + // (Actual behavior testing may require more setup) +} +``` + +### Step 8: Use Your New Step in Tests + +Add your step to any `testdata/steps.yml`: + +```yaml +componentID: my-component + +steps: + - type: provision + # ... provision step ... + + - type: my-custom + description: "Run my custom operation" + myCustomParams: + target: "{{.ComponentID}}-resource" + options: + - "enable-feature-x" + - "debug-mode" + retryCount: 2 +``` + +### Best Practices for Custom Steps + +1. **Keep Steps Focused**: Each step should do one thing well +2. **Use Template Data**: Leverage `{{.Variable}}` syntax for dynamic values +3. **Fail Fast**: Use `t.Fatalf()` for unrecoverable errors +4. **Add Logging**: Use `fmt.Printf()` or the logger package for visibility +5. **Parameter Validation**: Always validate required parameters at the start +6. **Error Context**: Provide clear error messages with context +7. **Idempotency**: Consider making steps idempotent when possible +8. **Documentation**: Document all parameters and provide examples + +### Example: Complete Custom Step + +Here's a complete example of a custom step that validates database connectivity: + +```go +// steps/database.go +package steps + +import ( + "fmt" + "testing" + "database/sql" + _ "github.com/lib/pq" +) + +type DatabaseParams struct { + ConnectionString string `json:"connectionString"` + Query string `json:"query"` + ExpectedRows int `json:"expectedRows"` +} + +func ExecuteDatabase(t *testing.T, step TestStep, testdataPath string, + tmplData TemplateData, projectName string) { + if step.DatabaseParams == nil { + t.Fatalf("Missing database parameters") + } + + params := step.DatabaseParams + connStr := RenderTemplate(t, params.ConnectionString, tmplData) + query := RenderTemplate(t, params.Query, tmplData) + + db, err := sql.Open("postgres", connStr) + if err != nil { + t.Fatalf("Failed to connect to database: %v", err) + } + defer db.Close() + + rows, err := db.Query(query) + if err != nil { + t.Fatalf("Query failed: %v", err) + } + defer rows.Close() + + count := 0 + for rows.Next() { + count++ + } + + if count != params.ExpectedRows { + t.Fatalf("Expected %d rows, got %d", params.ExpectedRows, count) + } + + fmt.Printf("✅ Database validation passed: %d rows\n", count) +} + +// steps/registry.go - add this handler +type DatabaseHandler struct{} + +func (h *DatabaseHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteDatabase(t, *step, params.TestdataPath, params.TmplData, params.ProjectName) + return nil +} + +// In registerDefaultHandlers(): +// defaultRegistry.Register("database", &DatabaseHandler{}) +``` + +### Advanced: Handler Parameters + +If your step needs additional context beyond `ExecutionParams`, you can: + +1. **Add to ExecutionParams**: Extend the struct if the parameter is commonly needed +2. **Use TestStep Fields**: Store step-specific data in your params struct +3. **Access Global Config**: Use the `config` map in ExecutionParams + +### Registry Pattern Benefits + +- ✅ **No Switch Statements**: Add steps without modifying test runner code +- ✅ **Plugin Architecture**: External packages can register custom steps +- ✅ **Testability**: Individual handlers can be unit tested in isolation +- ✅ **Type Safety**: Go compiler ensures all handlers implement the interface +- ✅ **Maintainability**: Step logic is cleanly separated and organized + +### Troubleshooting Custom Steps + +**Handler not found**: Ensure you've registered it in `registerDefaultHandlers()` +**Parameters nil**: Check YAML structure and JSON tags match +**Template errors**: Verify template syntax and that variables exist in `TemplateData` +**Import cycles**: Keep step implementations in the `steps` package +**Test failures**: Check parameter validation and error handling + +## Troubleshooting +- Login: `oc whoami`. +- Port-forwards: `ps aux | grep "oc port-forward" | grep -v grep`; kill with `pkill -f "oc port-forward"`. +- Ports in use: `lsof -i :8000-8009`. +- Resources: `oc get svc -n -dev`, `oc get pods -n -dev`. +- Add/extend `wait` steps if endpoints are not ready. + +## Best Practices +- Add `wait` before `http`/`run` to avoid races. +- Use retries for early endpoints. +- Keep scripts small; fail fast when expected `{ALIAS}_SERVICE_URL` is missing. +- Prefer templates for names/namespaces; avoid hardcoded hostnames. diff --git a/tests/quickstarter/TODO.md b/tests/quickstarter/TODO.md new file mode 100644 index 000000000..cdb45bbaf --- /dev/null +++ b/tests/quickstarter/TODO.md @@ -0,0 +1,22 @@ +Plan: Enhance Quickstarters Testing Framework +TL;DR: The ODS quickstarters testing framework is well-designed but lacks key capabilities for enterprise-grade functional testing. The framework needs structured reporting, test lifecycle hooks, better error diagnostics, and test data utilities to become more robust and easier to use. Enhancements should focus on observability, maintainability, and developer experience without breaking the current YAML-based configuration model. + +Steps +[X] Add test lifecycle hooks — Implement before/after step and component setup/teardown mechanisms in steps/types.go and expand quickstarter_test.go to support optional hook execution. + +[X] Build structured test reporting — Create a new reporting/ package with metrics collection (execution time per step, pass/fail counts, resource utilization), and export JUnit XML natively instead of relying on external tools. + +[X] Enhance error diagnostics — Extend error handling in verification.go and step implementations to capture context (pod logs, events, previous states) and provide actionable suggestions on common failures. + +[ ] Add test data utilities — Create a fixtures/ package with builders for common test objects (namespaces, deployments, ConfigMaps) and a cleanup policy system to handle data rollback after tests. + +[X] Implement execution control — Add YAML schema validation, conditional step execution (skip if conditions), and step-level retry logic in steps.go and relevant step files. + +[X] Improve extensibility — Refactor step registration from switch statements to a plugin/handler registry pattern, and document step authoring guidelines in QUICKSTARTERS_TESTS.md. + +Further Considerations +Backward compatibility — All changes should remain backward compatible with existing YAML test definitions; new features should be optional fields. + +Reporting scope — Focus on actionable metrics (timing, failures, resource states) vs. comprehensive performance profiling (which may be overkill); prioritize JUnit XML natively for CI/CD. + +Hook complexity trade-off — Hooks should be simple (shell scripts or templates) rather than requiring Go code, to keep YAML-based tests maintainable. \ No newline at end of file diff --git a/tests/quickstarter/bitbucket.go b/tests/quickstarter/bitbucket.go deleted file mode 100644 index c962ba236..000000000 --- a/tests/quickstarter/bitbucket.go +++ /dev/null @@ -1,33 +0,0 @@ -package quickstarter - -import ( - "encoding/base64" - "fmt" - - "github.com/opendevstack/ods-core/tests/utils" -) - -func recreateBitbucketRepo(config map[string]string, project string, repo string) error { - - password, err := base64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) - if err != nil { - return fmt.Errorf("Error decoding cd_user password: %w", err) - } - - stdout, stderr, err := utils.RunScriptFromBaseDir("tests/scripts/recreate-bitbucket-repo.sh", []string{ - fmt.Sprintf("--bitbucket=%s", config["BITBUCKET_URL"]), - fmt.Sprintf("--user=%s", config["CD_USER_ID"]), - fmt.Sprintf("--password=%s", password), - fmt.Sprintf("--project=%s", project), - fmt.Sprintf("--repository=%s", repo), - }, []string{}) - - if err != nil { - return fmt.Errorf( - "Execution of `recreate-bitbucket-repo.sh` failed: \nStdOut: %s\nStdErr: %s\nErr: %w", - stdout, - stderr, - err) - } - return nil -} diff --git a/tests/quickstarter/diagnostics/diagnostics.go b/tests/quickstarter/diagnostics/diagnostics.go new file mode 100644 index 000000000..7411f15df --- /dev/null +++ b/tests/quickstarter/diagnostics/diagnostics.go @@ -0,0 +1,172 @@ +package diagnostics + +import ( + "context" + "fmt" + "time" +) + +// FailureContext captures diagnostic information about a test failure. +type FailureContext struct { + StepIndex int + StepType string + Message string + Timestamp time.Time + Pod *PodInfo + Events []EventInfo + Logs []string + Environment map[string]string + Suggestion string +} + +// PodInfo contains information about a Kubernetes pod. +type PodInfo struct { + Name string + Namespace string + Phase string + ContainerStates []ContainerState + Conditions []PodCondition +} + +// ContainerState represents the state of a container +type ContainerState struct { + Name string + State string + Message string +} + +// PodCondition represents a pod condition +type PodCondition struct { + Type string + Status string + Reason string + Message string +} + +// EventInfo represents a Kubernetes event +type EventInfo struct { + Name string + Namespace string + Type string + Reason string + Message string + Timestamp time.Time + Count int32 +} + +// DiagnosticsCollector collects diagnostic information on test failures. +type DiagnosticsCollector struct { + ctx context.Context +} + +// NewDiagnosticsCollector creates a new diagnostics collector. +func NewDiagnosticsCollector(ctx context.Context) *DiagnosticsCollector { + return &DiagnosticsCollector{ctx: ctx} +} + +// CaptureFailureContext captures all available diagnostic information about a failure. +// This is a placeholder implementation - actual integration would require kubectl/oc calls. +func (dc *DiagnosticsCollector) CaptureFailureContext( + stepIndex int, + stepType string, + namespace string, + resourceName string, + err error, +) *FailureContext { + ctx := &FailureContext{ + StepIndex: stepIndex, + StepType: stepType, + Message: err.Error(), + Timestamp: time.Now(), + Environment: make(map[string]string), + } + + // In a real implementation, this would: + // 1. Query pod status and conditions + // 2. Retrieve recent events + // 3. Capture container logs + // 4. Analyze failure patterns and suggest fixes + + ctx.Suggestion = dc.suggestFix(stepType, err) + + return ctx +} + +// suggestFix provides actionable suggestions based on failure type and step type. +func (dc *DiagnosticsCollector) suggestFix(stepType string, err error) string { + _ = err.Error() // Reserved for future error pattern matching + + suggestions := map[string][]string{ + "provision": { + "Check Jenkins logs: oc logs -f ", + "Verify Bitbucket credentials and repository access", + "Ensure the quickstarter is properly configured", + }, + "build": { + "Check Jenkins build logs for details", + "Verify build resources are available", + "Check container image availability and pull secrets", + }, + "http": { + "Verify the service is running: kubectl get pods -l app=", + "Check service endpoints: kubectl get endpoints ", + "Verify network policies are not blocking access", + "Check firewall rules if accessing from external network", + }, + "wait": { + "Check pod status: kubectl describe pod ", + "Review pod events: kubectl get events --sort-by='.lastTimestamp'", + "Check resource requests/limits match available resources", + "Increase timeout if deployment takes longer", + }, + "inspect": { + "Verify the resource exists: kubectl get ", + "Check container logs: kubectl logs ", + "Inspect resource configuration: kubectl describe ", + }, + } + + if suggestions, ok := suggestions[stepType]; ok { + return fmt.Sprintf("Troubleshooting steps:\n - %s", fmt.Sprintf("%v", suggestions)) + } + + return "Check step configuration and related resource status" +} + +// IsTransientError determines if an error is likely transient (can be retried). +func IsTransientError(err error) bool { + if err == nil { + return false + } + + errMsg := err.Error() + + // Common transient error patterns + transientPatterns := []string{ + "timeout", + "temporarily unavailable", + "connection refused", + "connection reset", + "EOF", + "broken pipe", + "i/o timeout", + } + + for _, pattern := range transientPatterns { + if contains(errMsg, pattern) { + return true + } + } + + return false +} + +// contains checks if a string contains a substring (case-insensitive). +func contains(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/tests/quickstarter/docs/adding-step-types.md b/tests/quickstarter/docs/adding-step-types.md new file mode 100644 index 000000000..569933225 --- /dev/null +++ b/tests/quickstarter/docs/adding-step-types.md @@ -0,0 +1,238 @@ +# Adding New Step Types - Quick Reference + +This is a condensed guide for adding new step types to the quickstarter test framework. For comprehensive documentation, see the "Developing Custom Step Types" section in [QUICKSTARTERS_TESTS.md](../QUICKSTARTERS_TESTS.md). + +## Quick Start Checklist + +- [ ] 1. Add step type constant to `steps/types.go` +- [ ] 2. Add parameters struct to `steps/types.go` (if needed) +- [ ] 3. Add params field to `TestStep` struct in `steps/types.go` +- [ ] 4. Create `steps/my_step.go` with `ExecuteMyStep()` function +- [ ] 5. Add handler adapter to `steps/registry.go` +- [ ] 6. Register handler in `registerDefaultHandlers()` in `steps/registry.go` +- [ ] 7. Create documentation in `docs/steps/my-step.md` +- [ ] 8. Update step overview in `docs/steps.md` +- [ ] 9. Write tests in `steps/my_step_test.go` +- [ ] 10. Test in a real `testdata/steps.yml` + +## Code Templates + +### 1. Step Type Constant (`steps/types.go`) + +```go +const ( + // ... existing types ... + StepTypeMyStep = "my-step" +) +``` + +### 2. Parameters Struct (`steps/types.go`) + +```go +// MyStepParams defines parameters for the my-step step type. +type MyStepParams struct { + Target string `json:"target"` + Options []string `json:"options,omitempty"` + Timeout int `json:"timeout,omitempty"` +} +``` + +### 3. Add to TestStep (`steps/types.go`) + +```go +type TestStep struct { + // ... existing fields ... + MyStepParams *MyStepParams `json:"myStepParams,omitempty"` +} +``` + +### 4. Implementation (`steps/my_step.go`) + +```go +package steps + +import ( + "fmt" + "testing" +) + +// ExecuteMyStep handles the my-step step type. +func ExecuteMyStep(t *testing.T, step TestStep, testdataPath string, + tmplData TemplateData, projectName string) { + + // Validate parameters + if step.MyStepParams == nil { + t.Fatalf("Missing my-step parameters") + } + + params := step.MyStepParams + + // Render templates + target := RenderTemplate(t, params.Target, tmplData) + + // Implement logic + fmt.Printf("Executing my-step on target: %s\n", target) + + // ... your implementation here ... + + // Fail on error + if err != nil { + t.Fatalf("my-step failed: %v", err) + } +} +``` + +### 5. Handler Adapter (`steps/registry.go`) + +Add this handler struct with other handlers in the file: + +```go +// MyStepHandler implements the handler for my-step steps. +type MyStepHandler struct{} + +func (h *MyStepHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteMyStep(t, *step, params.TestdataPath, params.TmplData, params.ProjectName) + return nil +} +``` + +### 6. Registration (`steps/registry.go`) + +Add to `registerDefaultHandlers()`: + +```go +func registerDefaultHandlers() { + // ... existing registrations ... + defaultRegistry.Register(StepTypeMyStep, &MyStepHandler{}) +} +``` + +### 7. YAML Usage (`testdata/steps.yml`) + +```yaml +steps: + - type: my-step + description: "Execute my custom step" + myStepParams: + target: "{{.ComponentID}}-resource" + options: + - "verbose" + - "debug" + timeout: 30 +``` + +## Parameter Mapping Guide + +Your handler receives `ExecutionParams` which contains: + +| Field | Description | Use For | +|-------|-------------|---------| +| `TestdataPath` | Path to testdata directory | Loading test files | +| `TmplData` | Template variables | Rendering dynamic values | +| `RepoName` | Repository name | Git operations | +| `QuickstarterRepo` | QS repo name | Component info | +| `QuickstarterName` | QS name | Component info | +| `Config` | Configuration map | Global settings | +| `ProjectName` | OpenShift project | Resource namespace | + +Map these to your `ExecuteMyStep()` function signature as needed. + +## Common Patterns + +### Template Rendering +```go +target := RenderTemplate(t, params.Target, tmplData) +``` + +### OpenShift Commands +```go +namespace := fmt.Sprintf("%s-dev", projectName) +cmd := []string{"oc", "get", "pods", "-n", namespace} +stdout, stderr, err := utils.RunCommand("oc", cmd[1:], []string{}) +``` + +### File Operations +```go +filePath := fmt.Sprintf("%s/%s", testdataPath, params.FileName) +content, err := os.ReadFile(filePath) +``` + +### Conditional Logic with Templates +```go +if step.MyStepParams.Condition != "" { + shouldRun := RenderTemplate(t, step.MyStepParams.Condition, tmplData) + if shouldRun == "false" { + return // Skip execution + } +} +``` + +## Testing Your Step + +### Unit Test Template (`steps/my_step_test.go`) + +```go +package steps + +import "testing" + +func TestMyStepHandler(t *testing.T) { + registry := DefaultRegistry() + + handler, err := registry.Get(StepTypeMyStep) + if err != nil { + t.Fatalf("Handler not registered: %v", err) + } + + if handler == nil { + t.Fatal("Handler is nil") + } + + // Additional tests for your step logic +} +``` + +### Integration Test + +Create a test quickstarter with `testdata/steps.yml`: + +```bash +cd /path/to/test-quickstarter +mkdir -p testdata +cat > testdata/steps.yml << 'EOF' +componentID: test-component +steps: + - type: my-step + myStepParams: + target: "test-target" +EOF + +# Run the test +cd ods-core/tests +./dev-test.sh ../test-quickstarter test-project +``` + +## Troubleshooting + +| Issue | Solution | +|-------|----------| +| "unknown step type" | Check registration in `registerDefaultHandlers()` | +| Parameters are nil | Verify JSON tags and YAML structure match | +| Template errors | Ensure variables exist in `TemplateData` | +| Import cycle | Keep all step code in `steps` package | +| Handler not found in tests | Call `DefaultRegistry()` to trigger registration | + +## Examples + +See existing step implementations for reference: +- Simple step: `steps/http.go` +- Complex step: `steps/provision.go` +- With verification: `steps/inspect.go` +- With retries: Use `retry` in step YAML + +## Additional Resources + +- Full guide: [QUICKSTARTERS_TESTS.md](../QUICKSTARTERS_TESTS.md#developing-custom-step-types) +- Step types overview: [docs/steps.md](steps.md) +- Individual step docs: [docs/steps/](steps/) +- Registry pattern code: [steps/registry.go](../steps/registry.go) diff --git a/tests/quickstarter/docs/steps.md b/tests/quickstarter/docs/steps.md new file mode 100644 index 000000000..316475351 --- /dev/null +++ b/tests/quickstarter/docs/steps.md @@ -0,0 +1,147 @@ +# Test Steps Documentation + +This guide documents all available test step types used to build comprehensive end-to-end tests for ODS components. + +## Step Types Overview + +The following test step types are available: + +| Step Type | Purpose | Use Case | +|-----------|---------|----------| +| [expose-service](steps/expose-service.md) | Make Kubernetes services accessible | Access service URLs in tests | +| [wait](steps/wait.md) | Wait for conditions to be met | Wait for deployments, pods, endpoints | +| [run](steps/run.md) | Execute shell scripts | Run custom test scripts | +| [upload](steps/upload.md) | Upload files to Bitbucket | Commit test artifacts to repos | +| [build](steps/build.md) | Trigger Jenkins pipeline builds | Test build/deployment processes | +| [http](steps/http.md) | Test HTTP endpoints | Validate APIs and services | +| [inspect](steps/inspect.md) | Inspect container configuration | Verify logs, environment variables | +| [bitbucket](steps/bitbucket.md) | Manage Bitbucket repositories | Recreate repos, approve PRs | +| [provision](steps/provision.md) | Prepare test environment | Set up resources for tests | + +## Quick Start + +### Basic Test Workflow + +```yaml +# 1. Provision resources and run pipeline +- type: provision + componentID: "my-app" + provisionParams: + quickstarter: "docker" + +# 2. Wait for deployment to complete +- type: wait + waitParams: + condition: "deployment-complete" + resource: "deployment/my-app" + +# 3. Expose the service +- type: expose-service + exposeServiceParams: + services: + - serviceName: "my-app" + port: "8080" + +# 4. Test the API +- type: http + httpParams: + url: "http://my-app.example.com/health" + expectedStatus: 200 + +# 5. Run test scripts +- type: run + componentID: "my-app" + runParams: + file: "test-suite.sh" + +# 6. Inspect container +- type: inspect + componentID: "my-app" + inspectParams: + resource: "deployment/my-app" + checks: + logs: + contains: + - "Application started" +``` + +## Common Patterns + +### Testing a Quickstarter + +```yaml +- type: provision + componentID: "test-component" + provisionParams: + quickstarter: "python" + verify: + testResults: 25 + openShiftResources: + deployments: + - "test-component" +``` + +### Service Testing + +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "api" + - serviceName: "database" + +- type: http + httpParams: + url: "http://api.example.com/api/health" + expectedStatus: 200 +``` + +### Resource Verification + +```yaml +- type: provision + componentID: "app" + provisionParams: + verify: + openShiftResources: + deployments: + - "{{.ComponentID}}" + services: + - "{{.ComponentID}}" + routes: + - "{{.ComponentID}}" +``` + +## Template Variables + +All steps support Go template rendering with these variables: + +- **`{{.ProjectID}}`**: Project identifier +- **`{{.ComponentID}}`**: Component identifier +- **`{{.OdsNamespace}}`**: ODS cluster namespace +- **`{{.OdsGitRef}}`**: ODS Git reference +- **`{{.OdsImageTag}}`**: ODS image tag + +## Best Practices + +1. **Use wait steps**: Always wait for resources to be ready before testing +2. **Clear verification rules**: Use verification to ensure expected outcomes +3. **Template variables**: Use template variables for dynamic test configuration +4. **Service isolation**: Expose services in separate steps for clarity +5. **Sequential execution**: Steps execute in order; arrange them logically +6. **Error handling**: Each step fails the test if it encounters an error +7. **Cleanup**: Resources are automatically cleaned up after tests + +## Detailed Documentation + +For detailed information about each step type, see the links in the table above or navigate to the `steps/` directory. + +Each step documentation includes: +- Configuration examples +- Parameter descriptions +- How the step works +- Practical examples +- Common scenarios +- Best practices + +--- diff --git a/tests/quickstarter/docs/steps/bitbucket.md b/tests/quickstarter/docs/steps/bitbucket.md new file mode 100644 index 000000000..a593b15ba --- /dev/null +++ b/tests/quickstarter/docs/steps/bitbucket.md @@ -0,0 +1,215 @@ +# bitbucket Step + +Interacts with Bitbucket repositories: recreate repositories, upload/delete files, and manage pull requests with optional content verification. + +## Basic Usage + +```yaml +- type: bitbucket + description: "Manage Bitbucket resources" + componentID: "my-component" + bitbucketParams: + action: "recreate-repo" # required – see actions below + repository: "my-repo" # repo slug; templating supported + project: "{{.ProjectID}}" # optional project key +``` + +## Common Parameters (bitbucketParams) + +- `action` (string, required): One of `recreate-repo`, `approve-pr`, `get-pullrequest`, `delete-files`, `upload-file`. +- `repository` (string, optional): Repository slug. If omitted for some actions, defaults to `-` in code paths where needed. +- `project` (string, optional): Bitbucket project key. Defaults to current test project. +- `verify` (object, optional): For PR-related actions; supports `prChecks` map of JSON paths → expected values. + +Verification format (for PR actions): + +```yaml +verify: + prChecks: + .title: "Feature: Add new API endpoint" + .state: "OPEN" + .fromRef.displayId: "contains:feature/" +``` + +Notes: +- JSON paths use a simple dot notation (e.g. `.author.user.name`). +- Expected values support exact match or prefix `contains:` for substring checks. + +--- + +## Action: `recreate-repo` + +Deletes an existing repository (waits until removal if scheduled) and creates a fresh one with the same slug. + +Parameters: +- `repository` (required) +- `project` (optional) + +Example: +```yaml +- type: bitbucket + componentID: "my-component" + bitbucketParams: + action: recreate-repo + repository: "custom-repo-name" +``` + +--- + +## Action: `delete-files` + +Delete one or more files/folders from a repository via Git. + +Parameters: + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `repository` | string | Yes | Repository slug (templating supported). | +| `project` | string | No | Project key (defaults to current). | +| `paths` | array[string] | Yes | Relative paths in the repo; files or folders. | +| `commitMessage` | string | No | Commit message (default: "Remove files/folders"). | + +Examples: + +```yaml +- type: bitbucket + description: "Delete configuration file from repository" + bitbucketParams: + action: delete-files + repository: my-repo + paths: + - "config/old-settings.yaml" +``` + +```yaml +- type: bitbucket + description: "Clean up deprecated files" + bitbucketParams: + action: delete-files + repository: "{{.ProjectID}}-app" + project: "{{.ProjectID}}" + paths: + - "src/deprecated/old-component.ts" + - "tests/legacy-test.spec.ts" + - "docs/outdated-guide.md" + - "legacy-tests/" + commitMessage: "Clean up deprecated code and documentation" +``` + +How it works: +1. Clones the repo +2. Removes the specified paths +3. Commits and pushes the change + +--- + +## Action: `upload-file` + +Uploads a local file from the quickstarter testdata fixtures into the target repository using Git. + +Parameters: + +| Parameter | Type | Required | Description | +|-----------|------|----------|-------------| +| `file` | string | Yes | Source file path relative to the quickstarter `testdata` folder. | +| `filename` | string | No | Target path in the repository; defaults to the basename of `file`. Paths are created if needed. | +| `repository` | string | No | Target repository slug; defaults to `-`. | +| `project` | string | No | Project key (defaults to current). | +| `render` | bool | No | If true, renders the source file as a Go template with the test template data. | + +Example: + +```yaml +- type: bitbucket + componentID: "my-component" + bitbucketParams: + action: upload-file + file: "fixtures/pipeline/Jenkinsfile" + repository: "{{.ProjectID}}-my-component" + filename: "Jenkinsfile" + render: true +``` + +Notes: +- If `filename` contains directories (e.g., `tests/acceptance/spec.cy.ts`), the step ensures the path exists before copying. + +--- + +## Action: `get-pullrequest` + +Fetches a pull request and optionally validates its content via `verify.prChecks`. + +Parameters: +- `repository` (required) +- `project` (optional) +- `pullRequestID` (required) +- `verify.prChecks` (optional) – map of JSONPath → expected value + +Example: +```yaml +- type: bitbucket + componentID: "my-component" + bitbucketParams: + action: get-pullrequest + repository: my-repo + pullRequestID: "42" + verify: + prChecks: + title: "Feature: Add new API endpoint" + state: "OPEN" + description: "contains: This is the expected text..." +``` + +--- + +## Action: `approve-pr` + +Adds a reviewer (defaults to the CD user if not provided) and approves the PR. + +Parameters: +- `repository` (required) +- `project` (optional) +- `pullRequestID` (required) +- `reviewer` (optional) +- `verify.prChecks` (optional) – validations run before approval + +Example: +```yaml +- type: bitbucket + componentID: "my-component" + bitbucketParams: + action: approve-pr + repository: my-repo + pullRequestID: "42" + reviewer: "j.doe" +``` + +--- + +## General Behavior + +1. Authentication via `CD_USER_ID` and `CD_USER_PWD_B64` from the test configuration +2. Action-specific behavior as described above +3. Optional verification for PRs using simple JSON path assertions +4. Meaningful error messages on failure (HTTP status, script output) + +## End-to-End Workflow Example + +```yaml +- type: bitbucket + componentID: "feature-component" + bitbucketParams: + action: recreate-repo + +- type: build + componentID: "feature-component" + buildParams: + branch: "feature/test" + +- type: bitbucket + componentID: "feature-component" + bitbucketParams: + action: approve-pr + repository: feature-repo + pullRequestID: "1" +``` diff --git a/tests/quickstarter/docs/steps/build.md b/tests/quickstarter/docs/steps/build.md new file mode 100644 index 000000000..272281894 --- /dev/null +++ b/tests/quickstarter/docs/steps/build.md @@ -0,0 +1,97 @@ +# build Step + +Triggers a Jenkins pipeline build for testing component build and deployment processes. Similar to provision but for testing existing repositories without extensive cleanup. + +## Configuration + +```yaml +- type: build + description: "Build component" + componentID: "my-component" + buildParams: + branch: "develop" # optional, defaults to "master" + repository: "my-repo" # optional, defaults to component repository + pipeline: "Jenkinsfile" # optional, Jenkins pipeline file name + env: # optional, environment variables + - name: "BUILD_ENV" + value: "test" + verify: # optional, verification configuration + testResults: 20 + openShiftResources: + deployments: + - "{{.ComponentID}}" +``` + +## Parameters + +### `branch` (optional) +Git branch to use for the pipeline. Defaults to `master`. +Supports Go template rendering (e.g., `{{.ComponentID}}`). + +### `repository` (optional) +The Bitbucket repository to build. Defaults to the component's repository. +Supports Go template rendering. + +### `pipeline` (optional) +The Jenkins pipeline file to execute. Defaults to `Jenkinsfile`. +Supports Go template rendering. + +### `env` (optional) +Array of environment variables to pass to the Jenkins pipeline. Each entry has: +- **`name`** (required): Environment variable name +- **`value`** (required): Environment variable value (supports Go template rendering) + +### `verify` (optional) +Verification configuration for the pipeline run. See the provision step's verification section for available verification options. + +## How It Works + +1. **Repository Setup**: Uses the specified repository and branch +2. **Pipeline Execution**: Runs the Jenkins pipeline with the provided configuration +3. **Verification**: Checks the pipeline execution against specified verification criteria +4. **Error Handling**: Fails the test if the pipeline execution fails + +## Examples + +**Minimal build:** +```yaml +- type: build + componentID: "my-app" + buildParams: {} +``` + +**Build specific branch:** +```yaml +- type: build + componentID: "app" + buildParams: + branch: "feature/new-feature" +``` + +**Build with custom environment:** +```yaml +- type: build + componentID: "service" + buildParams: + branch: "main" + env: + - name: "ENVIRONMENT" + value: "staging" + - name: "DEBUG" + value: "true" +``` + +**Build with verification:** +```yaml +- type: build + componentID: "app" + buildParams: + branch: "develop" + verify: + testResults: 30 + openShiftResources: + deployments: + - "{{.ComponentID}}" + services: + - "{{.ComponentID}}" +``` diff --git a/tests/quickstarter/docs/steps/expose-service.md b/tests/quickstarter/docs/steps/expose-service.md new file mode 100644 index 000000000..b217b134c --- /dev/null +++ b/tests/quickstarter/docs/steps/expose-service.md @@ -0,0 +1,147 @@ +# expose-service Step + +Makes Kubernetes services accessible to test steps by resolving their URLs and setting up port-forwards for local testing. + +## Configuration + +```yaml +- type: expose-service + description: "Expose service with defaults" + exposeServiceParams: + services: + - serviceName: "{{.ComponentID}}" + port: "8080" + namespace: "{{.ProjectID}}-dev" # optional, defaults to {{.ProjectID}}-dev +``` + +## Parameters + +### `services` (required) +Array of services to expose. Each service object contains: + +- **`serviceName`** (required, string): The Kubernetes service name. Supports Go template rendering (e.g., `{{.ComponentID}}`). +- **`port`** (optional, string): Service port number. Defaults to `8080`. +- **`namespace`** (optional, string): Kubernetes namespace where the service runs. Defaults to `{{.ProjectID}}-dev`. Supports Go template rendering. + +## How It Works + +1. **Service Lookup**: Waits for the service to be ready (up to 120 seconds) +2. **URL Resolution**: + - In cluster: `http://service-name.namespace.svc.cluster.local:port` + - Locally: Sets up `kubectl port-forward` → `http://localhost:forwarded_port` +3. **Storage**: URLs stored as `ExposedService_` in template data + +## Accessing Services in Later Steps + +### Single Service in `run` steps + +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "api" + +- type: run + runParams: + file: "test.sh" +``` + +Access via `$SERVICE_URL`: +```bash +curl -s "$SERVICE_URL/health" +``` + +### Multiple Services in `run` steps + +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "api" + port: "8080" + - serviceName: "backend" + port: "9000" + +- type: run + runParams: + file: "test.sh" + services: + api: "api" + backend: "backend" +``` + +Access via named environment variables: +```bash +curl -s "$API_SERVICE_URL/health" +curl -s "$BACKEND_SERVICE_URL/metrics" +``` + +The service alias is converted to uppercase and suffixed with `_SERVICE_URL`. + +### In `http` steps + +Use Kubernetes DNS directly: +```yaml +- type: http + httpParams: + url: "http://api.{{.ProjectID}}-dev.svc.cluster.local:8080/health" +``` + +## Examples + +**Single service:** +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "app" + port: "8080" + +- type: run + runParams: + file: "test.sh" +``` + +**Multiple services:** +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "api" + port: "8080" + - serviceName: "db" + port: "5432" + +- type: run + runParams: + file: "integration_test.sh" + services: + api: "api" + db: "db" +``` + +Script receives: `$API_SERVICE_URL` and `$DB_SERVICE_URL` + +## Cleanup + +Port-forwards are automatically cleaned up when tests complete, fail, or are interrupted. + +## Common Scenarios + +**Custom namespace:** +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "my-service" + port: "3000" + namespace: "custom-namespace" +``` + +**Template variables in service names:** +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "{{.ComponentID}}-api" +``` diff --git a/tests/quickstarter/docs/steps/http.md b/tests/quickstarter/docs/steps/http.md new file mode 100644 index 000000000..bbb1ba704 --- /dev/null +++ b/tests/quickstarter/docs/steps/http.md @@ -0,0 +1,149 @@ +# http Step + +Tests HTTP endpoints by making requests and validating responses. Supports retries, method variations, request/response body validation, and header checks. + +## Configuration + +```yaml +- type: http + description: "Test API endpoint" + httpParams: + url: "http://{{.ComponentID}}.example.com/health" + method: "GET" # optional, defaults to GET + timeout: 30 # optional, timeout in seconds, defaults to 30 + headers: # optional, HTTP headers + Authorization: "Bearer token123" + body: '{"key": "value"}' # optional, request body + retry: # optional, retry configuration + attempts: 3 + delay: "5s" + expectedStatus: 200 # optional, expected HTTP status code + expectedBody: "golden/health.json" # optional, path to a golden file for response body validation + assertions: # optional, assertions to run on the response + - path: "status" + equals: "UP" + - path: "components.db.status" + equals: "UP" + - path: "body.message" + contains: "Hello" + - path: "timestamp" + matches: "\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}Z" + - path: "optionalField" + exists: false +``` + +## Parameters + +### `url` (required) +The HTTP endpoint URL to test. Supports Go template rendering (e.g., `{{.ComponentID}}`). + +### `method` (optional) +HTTP method to use. Defaults to `GET`. +Allowed values: `GET`, `POST`, `PUT`, `DELETE`, `PATCH`, `HEAD` + +### `timeout` (optional) +Request timeout in seconds. Defaults to `30`. + +### `headers` (optional) +Map of HTTP headers to include in the request. + +### `body` (optional) +Request body as a string (typically JSON). +Supports Go template rendering. + +### `retry` (optional) +Retry configuration with: +- **`attempts`** (optional): Number of retry attempts. Defaults to 1. +- **`delay`** (optional): Delay between retries (Go duration format like `5s`, `1m`). + +### `expectedStatus` (optional) +Expected HTTP status code. If not specified, accepts any 2xx or 3xx response. + +### `expectedBody` (optional) +Path to a "golden file" containing the expected response body. The response body will be compared against the content of this file. This is typically used for validating JSON responses. + +### `assertions` (optional) +A list of assertions to validate the response body. This is especially useful for JSON responses. Each assertion can have the following fields: +- **`path`** (required for most assertions): A [GJSON path](https://github.com/tidwall/gjson/blob/master/SYNTAX.md) to extract a value from the JSON response body. +- **`equals`** (optional): Checks if the value at `path` is equal to the given value. +- **`exists`** (optional): Checks if the given `path` exists in the JSON response. The value should be `true` or `false`. +- **`contains`** (optional): Checks if the value at `path` (if provided) or the whole body contains the given string. +- **`matches`** (optional): Checks if the value at `path` (if provided) or the whole body matches the given regular expression. + +## How It Works + +1. **URL Resolution**: Resolves the URL (handles route names, in-cluster DNS, port-forwards) +2. **Request Building**: Constructs the HTTP request with headers, body, and method +3. **Retry Logic**: Retries the request on failure up to specified attempts +4. **Response Validation**: Checks status code, compares body against a golden file (if `expectedBody` is set), and runs assertions (if `assertions` are set). +5. **Error Handling**: Fails the test if validation fails + +## Examples + +**Simple GET request:** +```yaml +- type: http + httpParams: + url: "http://localhost:8080/health" + expectedStatus: 200 +``` + +**POST with request body:** +```yaml +- type: http + httpParams: + url: "http://api.example.com/users" + method: "POST" + headers: + Content-Type: "application/json" + Authorization: "Bearer token123" + body: '{"name": "test", "email": "test@example.com"}' + expectedStatus: 201 +``` + +**Validate response body with assertions:** +```yaml +- type: http + httpParams: + url: "http://{{.ComponentID}}.example.com/api/status" + expectedStatus: 200 + assertions: + - path: "status" + equals: "healthy" + - path: "details.version" + matches: '"\\d+\\.\\d+\\.\\d+"' + - path: "message" + contains: "service is running" +``` + +**Validate response against a golden file:** +```yaml +- type: http + httpParams: + url: "http://api.example.com/data" + expectedBody: "golden/api-data.json" +``` + +**HTTP request with retries:** +```yaml +- type: http + httpParams: + url: "http://{{.ComponentID}}.example.com/ready" + timeout: 10 + retry: + attempts: 5 + delay: "3s" + expectedStatus: 200 + assertions: + - path: "status" + equals: "ready" +``` + +**DELETE request:** +```yaml +- type: http + httpParams: + url: "http://api.example.com/resource/123" + method: "DELETE" + expectedStatus: 204 +``` diff --git a/tests/quickstarter/docs/steps/inspect.md b/tests/quickstarter/docs/steps/inspect.md new file mode 100644 index 000000000..469c074c6 --- /dev/null +++ b/tests/quickstarter/docs/steps/inspect.md @@ -0,0 +1,146 @@ +# inspect Step + +Inspects container behavior by checking logs, environment variables, and resource metrics. Useful for validating that containers are running with correct configuration and logging expected messages. + +## Configuration + +```yaml +- type: inspect + description: "Inspect container configuration" + componentID: "my-component" + inspectParams: + resource: "pod/{{.ComponentID}}-xyz" # pod or deployment name + namespace: "{{.ProjectID}}-dev" # optional, defaults to {{.ProjectID}}-dev + checks: + logs: # optional, validate log output + contains: + - "Application started" + notContains: + - "ERROR" + - "FATAL" + matches: + - "version: \\d+\\.\\d+\\.\\d+" + env: # optional, validate environment variables + - name: "APP_ENV" + expectedValue: "production" + - name: "DEBUG_MODE" + expectedValue: "false" + resources: # optional, validate resource requests/limits + requestsCPU: "100m" + requestsMemory: "256Mi" + limitsCPU: "500m" + limitsMemory: "512Mi" +``` + +## Parameters + +### `resource` (required) +The Kubernetes resource to inspect. Can be: +- Pod name: `my-pod` +- Deployment name: `my-deployment` +- Full resource: `pod/my-pod` + +Supports Go template rendering (e.g., `{{.ComponentID}}`). + +### `namespace` (optional) +Kubernetes namespace where the resource runs. Defaults to `{{.ProjectID}}-dev`. +Supports Go template rendering. + +### `checks` (optional) +Object containing inspection checks: + +**`logs`** (optional): Validate container logs +- **`contains`** (optional): Array of strings that must appear in logs +- **`notContains`** (optional): Array of strings that must not appear in logs +- **`matches`** (optional): Array of regex patterns that must match in logs + +**`env`** (optional): Validate environment variables +- Array of objects with: + - **`name`** (required): Environment variable name + - **`expectedValue`** (required): Expected value (supports Go template rendering) + +**`resources`** (optional): Validate resource requests and limits +- **`requestsCPU`** (optional): Expected CPU request (e.g., `100m`) +- **`requestsMemory`** (optional): Expected memory request (e.g., `256Mi`) +- **`limitsCPU`** (optional): Expected CPU limit (e.g., `500m`) +- **`limitsMemory`** (optional): Expected memory limit (e.g., `512Mi`) + +## How It Works + +1. **Resource Lookup**: Finds the specified pod or deployment +2. **Log Inspection**: Retrieves logs and validates against patterns +3. **Environment Check**: Inspects environment variables inside the container +4. **Resource Validation**: Checks resource requests and limits +5. **Error Handling**: Fails if any check fails + +## Examples + +**Inspect logs:** +```yaml +- type: inspect + componentID: "app" + inspectParams: + resource: "{{.ComponentID}}" + namespace: "{{.ProjectID}}-dev" + checks: + logs: + contains: + - "Application started successfully" + - "Listening on port" + notContains: + - "ERROR" +``` + +**Inspect environment variables:** +```yaml +- type: inspect + componentID: "service" + inspectParams: + resource: "deployment/{{.ComponentID}}" + checks: + env: + - name: "ENVIRONMENT" + expectedValue: "production" + - name: "LOG_LEVEL" + expectedValue: "info" +``` + +**Inspect resource requests/limits:** +```yaml +- type: inspect + componentID: "app" + inspectParams: + resource: "{{.ComponentID}}" + checks: + resources: + requestsCPU: "100m" + requestsMemory: "256Mi" + limitsCPU: "500m" + limitsMemory: "512Mi" +``` + +**Comprehensive inspection:** +```yaml +- type: inspect + componentID: "api" + inspectParams: + resource: "pod/{{.ComponentID}}-deployment" + namespace: "{{.ProjectID}}-dev" + checks: + logs: + contains: + - "API server started" + - "Database connected" + matches: + - "version: \\d+\\.\\d+\\.\\d+" + env: + - name: "APP_MODE" + expectedValue: "production" + - name: "DATABASE_URL" + expectedValue: "postgresql://db:5432" + resources: + requestsCPU: "200m" + requestsMemory: "512Mi" + limitsCPU: "1000m" + limitsMemory: "1Gi" +``` diff --git a/tests/quickstarter/docs/steps/provision.md b/tests/quickstarter/docs/steps/provision.md new file mode 100644 index 000000000..eae602302 --- /dev/null +++ b/tests/quickstarter/docs/steps/provision.md @@ -0,0 +1,437 @@ +# provision Step + +Prepares the environment for testing by recreating Bitbucket repositories, cleaning up OpenShift resources, and executing Jenkins pipelines with customizable configuration. + +## Configuration + +```yaml +- type: provision + description: "Provision component resources" + componentID: "my-component" + provisionParams: + quickstarter: "docker" # optional, defaults to quickstarter under test + pipeline: "Jenkinsfile" # optional, Jenkins pipeline file + branch: "main" # optional, Git branch, defaults to ODS_GIT_REF + agentImageTag: "latest" # optional, image tag for agent, defaults to ODS_IMAGE_TAG + sharedLibraryRef: "master" # optional, shared library reference, defaults to agentImageTag + env: # optional, additional environment variables + - name: "CUSTOM_VAR" + value: "custom-value" + testResourcesCleanUp: # optional, resources to clean before provisioning + - resourceType: "pod" + resourceName: "old-pod" + namespace: "dev" # optional, defaults to "dev" + verify: # optional, pipeline verification configuration + logs: + - "Build started" + - "Deployment successful" +``` + +## Parameters + +### `quickstarter` (optional) +The quickstarter to use for provisioning. Can be: +- Simple name: `docker` +- With repository: `quickstarters/docker` +- Defaults to the quickstarter under test + +Supports Go template rendering (e.g., `{{.ComponentID}}`). + +### `pipeline` (optional) +The Jenkins pipeline file to execute. Defaults to `Jenkinsfile`. + +### `branch` (optional) +Git branch to use for the pipeline. Defaults to the value of `ODS_GIT_REF` from configuration. +Supports Go template rendering. + +### `agentImageTag` (optional) +Docker image tag for the Jenkins agent. Defaults to `ODS_IMAGE_TAG` from configuration. +Supports Go template rendering. + +### `sharedLibraryRef` (optional) +Git reference for the shared library. Defaults to the value of `agentImageTag`. +Supports Go template rendering. + +### `env` (optional) +Array of additional environment variables to pass to the Jenkins pipeline. Each entry has: +- **`name`** (required): Environment variable name +- **`value`** (required): Environment variable value (supports Go template rendering) + +### `testResourcesCleanUp` (optional) +Array of OpenShift resources to clean up before provisioning. Each entry has: +- **`resourceType`** (required): Kubernetes resource type (e.g., `pod`, `deployment`, `job`) +- **`resourceName`** (required): Name of the resource to delete +- **`namespace`** (optional): Namespace suffix (defaults to `dev`). Full namespace is `{PROJECT_ID}-{namespace}` + +### `verify` (optional) +Verification configuration for the Jenkins pipeline run. See [Verification Configuration](#verification-configuration). + +## Verification Configuration + +The `verify` section allows checking that the Jenkins pipeline executed successfully by validating various aspects: + +### `jenkinsStages` (optional) +Path to a JSON golden file containing expected Jenkins pipeline stages. +The build stages are compared against this file to ensure the pipeline structure matches expectations. + +```yaml +verify: + jenkinsStages: "expected-stages.json" +``` + +### `sonarScan` (optional) +Path to a JSON golden file containing expected SonarQube analysis results. +Verifies that code quality metrics match expected values. + +```yaml +verify: + sonarScan: "expected-sonar-results.json" +``` + +### `runAttachments` (optional) +Array of artifact names that should be attached to the Jenkins build. +Verifies that specific build artifacts (logs, reports, binaries) were created and attached. + +```yaml +verify: + runAttachments: + - "{{.ComponentID}}-artifact.jar" + - "build-report.html" + - "test-results.xml" +``` + +Each attachment name supports Go template rendering. + +### `testResults` (optional) +Minimum number of unit tests that should have been executed. +Verifies that at least this many unit tests were run during the build. + +```yaml +verify: + testResults: 50 # Expect at least 50 unit tests +``` + +### `openShiftResources` (optional) +Configuration for verifying OpenShift/Kubernetes resources created by the pipeline. + +**Namespace** (optional): +Target namespace for resource verification. Defaults to `{PROJECT_ID}-dev`. +Supports Go template rendering. + +**Resource Types** (optional): +Arrays of resource names to verify exist in the namespace. Supported types: + +- **`buildConfigs`**: BuildConfig resources +- **`deploymentConfigs`**: DeploymentConfig resources +- **`deployments`**: Kubernetes Deployments +- **`statefulSets`**: StatefulSet resources +- **`daemonSets`**: DaemonSet resources +- **`replicaSets`**: ReplicaSet resources +- **`services`**: Kubernetes Services +- **`imageStreams`**: OpenShift ImageStream resources +- **`routes`**: OpenShift Route resources +- **`ingresses`**: Kubernetes Ingress resources +- **`configMaps`**: ConfigMap resources +- **`secrets`**: Secret resources +- **`persistentVolumeClaims`**: PVC resources +- **`serviceAccounts`**: ServiceAccount resources +- **`roles`**: RBAC Role resources +- **`roleBindings`**: RBAC RoleBinding resources +- **`networkPolicies`**: NetworkPolicy resources +- **`jobs`**: Kubernetes Job resources +- **`cronJobs`**: CronJob resources +- **`pods`**: Pod resources +- **`horizontalPodAutoscalers`**: HPA resources + +Each resource name supports Go template rendering. + +```yaml +verify: + openShiftResources: + namespace: "{{.ProjectID}}-dev" + deployments: + - "{{.ComponentID}}" + - "{{.ComponentID}}-secondary" + services: + - "{{.ComponentID}}" + - "{{.ComponentID}}-api" + routes: + - "{{.ComponentID}}" + configMaps: + - "{{.ComponentID}}-config" +``` + +### Verification Strategies + +By default, verification uses the **aggregate** strategy, which collects all verification failures and reports them together. You can also use **fail-fast** strategy to stop at the first failure: + +```yaml +verify: + strategy: "fail-fast" # or "aggregate" (default) + jenkinsStages: "expected-stages.json" + testResults: 25 +``` + +## How It Works + +1. **Bitbucket Preparation**: Recreates the Bitbucket repository for the component +2. **Resource Cleanup**: Deletes existing OpenShift resources in dev, test, and cd namespaces +3. **Test Cleanup**: Removes any test-specific resources if specified +4. **Cleanup Registration**: Registers automatic cleanup to run after the test completes +5. **Pipeline Execution**: Runs the Jenkins pipeline with merged configuration +6. **Verification**: Checks the pipeline execution against specified verification criteria +7. **Resource Preservation** (optional): If `KEEP_RESOURCES=true` environment variable is set, skips final cleanup + +## Automatically Provided Environment Variables + +The Jenkins pipeline automatically receives: + +- **`ODS_NAMESPACE`**: ODS cluster namespace +- **`ODS_GIT_REF`**: ODS Git reference +- **`ODS_IMAGE_TAG`**: ODS image tag +- **`ODS_BITBUCKET_PROJECT`**: ODS Bitbucket project +- **`AGENT_IMAGE_TAG`**: Docker agent image tag +- **`SHARED_LIBRARY_REF`**: Shared library Git reference +- **`PROJECT_ID`**: Project identifier +- **`COMPONENT_ID`**: Component identifier +- **`GIT_URL_HTTP`**: HTTP Git URL to the Bitbucket repository +- **Custom `env` variables**: Any additional variables defined in provisionParams + +## Examples + +**Minimal provision:** +```yaml +- type: provision + componentID: "my-component" + provisionParams: {} +``` + +**Provision with specific quickstarter:** +```yaml +- type: provision + componentID: "backend" + provisionParams: + quickstarter: "go" + branch: "develop" +``` + +**Provision with custom agent and shared library:** +```yaml +- type: provision + componentID: "frontend" + provisionParams: + quickstarter: "node" + agentImageTag: "v1.2.3" + sharedLibraryRef: "release/1.2" +``` + +**Provision with custom environment variables:** +```yaml +- type: provision + componentID: "app" + provisionParams: + branch: "main" + env: + - name: "BUILD_TYPE" + value: "production" + - name: "DEPLOYMENT_REGION" + value: "us-east-1" +``` + +**Provision with resource cleanup:** +```yaml +- type: provision + componentID: "service" + provisionParams: + quickstarter: "python" + testResourcesCleanUp: + - resourceType: "pod" + resourceName: "old-service-pod" + - resourceType: "configmap" + resourceName: "legacy-config" + namespace: "test" +``` + +**Provision with Jenkins stages verification:** +```yaml +- type: provision + componentID: "backend" + provisionParams: + quickstarter: "go" + verify: + jenkinsStages: "golden/jenkins-stages.json" + testResults: 30 +``` + +**Provision with SonarQube verification:** +```yaml +- type: provision + componentID: "api" + provisionParams: + quickstarter: "java" + verify: + sonarScan: "golden/sonar-results.json" + testResults: 100 +``` + +**Provision with artifact verification:** +```yaml +- type: provision + componentID: "app" + provisionParams: + quickstarter: "docker" + verify: + runAttachments: + - "{{.ComponentID}}-build.log" + - "{{.ComponentID}}-image.tar" + - "test-results.xml" +``` + +**Provision with OpenShift resource verification:** +```yaml +- type: provision + componentID: "microservice" + provisionParams: + quickstarter: "python" + verify: + openShiftResources: + namespace: "{{.ProjectID}}-dev" + deployments: + - "{{.ComponentID}}" + services: + - "{{.ComponentID}}" + - "{{.ComponentID}}-api" + routes: + - "{{.ComponentID}}" + configMaps: + - "{{.ComponentID}}-config" + secrets: + - "{{.ComponentID}}-credentials" +``` + +**Provision with comprehensive verification:** +```yaml +- type: provision + componentID: "api" + provisionParams: + quickstarter: "java" + verify: + strategy: "fail-fast" + jenkinsStages: "golden/java-stages.json" + sonarScan: "golden/java-sonar.json" + testResults: 50 + runAttachments: + - "{{.ComponentID}}-*.jar" + - "coverage-report.html" + openShiftResources: + namespace: "{{.ProjectID}}-dev" + deployments: + - "{{.ComponentID}}" + services: + - "{{.ComponentID}}" + routes: + - "{{.ComponentID}}" +``` + +**Provision with repository override:** +```yaml +- type: provision + componentID: "shared-lib" + provisionParams: + quickstarter: "shared-libraries/groovy" +``` + +**Provision with all options:** +```yaml +- type: provision + componentID: "microservice" + provisionParams: + quickstarter: "node" + pipeline: "Jenkinsfile.prod" + branch: "release/2.0" + agentImageTag: "v2.0.0" + sharedLibraryRef: "release/2.0" + env: + - name: "ENVIRONMENT" + value: "production" + - name: "REPLICAS" + value: "3" + - name: "LOG_LEVEL" + value: "info" + testResourcesCleanUp: + - resourceType: "pvc" + resourceName: "temp-storage" + verify: + logs: + - "Provisioning started" + - "All tests passed" + - "Deployment successful" +``` + +## Common Scenarios + +**Test a quickstarter in isolation:** +```yaml +- type: provision + componentID: "test-component" + provisionParams: + quickstarter: "docker" +``` + +**Test with specific Git branch:** +```yaml +- type: provision + componentID: "component" + provisionParams: + branch: "feature/new-build-system" +``` + +**Test with custom Jenkins agent version:** +```yaml +- type: provision + componentID: "component" + provisionParams: + agentImageTag: "3.0.0" + sharedLibraryRef: "3.0" +``` + +**Provision and run tests:** +```yaml +- type: provision + componentID: "app" + provisionParams: + quickstarter: "python" + +- type: wait + waitParams: + condition: "deployment-complete" + resource: "deployment/app" + +- type: run + componentID: "app" + runParams: + file: "integration-tests.sh" +``` + +**Clean up specific test resources:** +```yaml +- type: provision + componentID: "service" + provisionParams: + testResourcesCleanUp: + - resourceType: "pod" + resourceName: "temporary-test-pod" + - resourceType: "configmap" + resourceName: "test-config" + namespace: "test" +``` + +## Best Practices + +- **Use verified logs**: Include specific log messages in verify section to ensure pipeline completed successfully +- **Clean test resources**: Specify any temporary resources to clean up before provisioning +- **Use meaningful component IDs**: Choose descriptive component IDs that match your quickstarter type +- **Set KEEP_RESOURCES for debugging**: Set environment variable `KEEP_RESOURCES=true` to preserve resources for investigation +- **Version management**: Pin agent and shared library versions for reproducible builds +- **Custom environment**: Use `env` parameter for environment-specific configuration +- **Branch strategy**: Use feature branches to test new pipeline changes before merging to main diff --git a/tests/quickstarter/docs/steps/run.md b/tests/quickstarter/docs/steps/run.md new file mode 100644 index 000000000..1c346d3cf --- /dev/null +++ b/tests/quickstarter/docs/steps/run.md @@ -0,0 +1,205 @@ +# run Step + +Executes shell scripts with access to test context, service URLs, and configuration variables as environment variables. + +## Configuration + +```yaml +- type: run + description: "Run test script" + componentID: "my-component" + runParams: + file: "test.sh" + services: + api: "api-service" # optional, maps service aliases to service names + backend: "backend-service" +``` + +## Parameters + +### `file` (required) +Path to the shell script to execute, relative to the test data directory. +Supports Go template rendering (e.g., `scripts/{{.ComponentID}}_test.sh`). + +### `services` (optional) +Map of service aliases to Kubernetes service names. Each alias is converted to an environment variable. +- **Key**: The alias name (e.g., `api`, `backend`) +- **Value**: The Kubernetes service name (supports Go template rendering) + +If no services map is defined, the `ComponentID` is automatically exported as `SERVICE_URL` for backward compatibility. + +## Environment Variables + +Scripts automatically receive these environment variables: + +### Always Provided + +- **`COMPONENT_ID`**: The component ID from the test step +- **`PROJECT_ID`**: The project name +- **`NAMESPACE`**: The default namespace (`{PROJECT_ID}-dev`) + +### From Exposed Services + +When services are exposed via `expose-service` steps: + +- **Single service (backward compatible)**: `$SERVICE_URL` +- **Named services map**: `${ALIAS}_SERVICE_URL` (uppercase alias, e.g., `$API_SERVICE_URL`, `$BACKEND_SERVICE_URL`) + +### From Template Data + +- **`ODS_NAMESPACE`**: ODS cluster namespace if available +- **`ODS_GIT_REF`**: ODS Git reference if available +- **`ODS_IMAGE_TAG`**: ODS image tag if available + +## How It Works + +1. **Script Execution**: Runs the shell script at the specified path +2. **Environment Setup**: Injects all relevant environment variables before execution +3. **Service Resolution**: Looks up exposed service URLs from previous `expose-service` steps +4. **Error Handling**: Fails the test if the script exits with non-zero status +5. **Output Capture**: Captures and logs both stdout and stderr + +## Examples + +**Simple script execution:** +```yaml +- type: run + componentID: "my-app" + runParams: + file: "test.sh" +``` + +Script can use: +```bash +#!/bin/bash +echo "Component: $COMPONENT_ID" +echo "Project: $PROJECT_ID" +echo "Namespace: $NAMESPACE" +``` + +**With single exposed service:** +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "my-app" + port: "8080" + +- type: run + componentID: "my-app" + runParams: + file: "integration-test.sh" +``` + +Script can use: +```bash +#!/bin/bash +curl -s "$SERVICE_URL/health" +``` + +**With multiple named services:** +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "api" + port: "8080" + - serviceName: "database" + port: "5432" + +- type: run + componentID: "api" + runParams: + file: "full-integration-test.sh" + services: + api: "api" + database: "database" +``` + +Script can use: +```bash +#!/bin/bash +# Test API health +curl -s "$API_SERVICE_URL/health" + +# Test database connectivity +nc -zv "$DATABASE_SERVICE_URL" 5432 +``` + +**With template variables:** +```yaml +- type: run + componentID: "{{.ComponentID}}" + runParams: + file: "tests/{{.ComponentID}}_validation.sh" + services: + component: "{{.ComponentID}}" +``` + +**Accessing ODS configuration:** +```yaml +- type: run + componentID: "my-component" + runParams: + file: "ods-config-test.sh" +``` + +Script can use: +```bash +#!/bin/bash +echo "ODS Namespace: $ODS_NAMESPACE" +echo "ODS Git Ref: $ODS_GIT_REF" +echo "ODS Image Tag: $ODS_IMAGE_TAG" +``` + +## Common Scenarios + +**Verify service connectivity:** +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "api" + port: "8080" + +- type: run + componentID: "api" + runParams: + file: "verify-connectivity.sh" +``` + +**Run multiple test scripts in sequence:** +```yaml +- type: expose-service + exposeServiceParams: + services: + - serviceName: "app" + +- type: run + runParams: + file: "unit-tests.sh" + +- type: run + runParams: + file: "integration-tests.sh" + +- type: run + runParams: + file: "smoke-tests.sh" +``` + +**Test with component-specific script:** +```yaml +- type: run + componentID: "{{.ComponentID}}" + runParams: + file: "test-{{.ComponentID}}.sh" +``` + +## Best Practices + +- **Fail on errors**: Start scripts with `set -e` to fail immediately on errors +- **Log output**: Use `set -x` for debugging script execution +- **Clean paths**: Use absolute paths when referencing test data files +- **Exit codes**: Ensure scripts return 0 on success and non-zero on failure +- **Service URLs**: Always check if `$SERVICE_URL` or named service URLs are set before using them diff --git a/tests/quickstarter/docs/steps/upload.md b/tests/quickstarter/docs/steps/upload.md new file mode 100644 index 000000000..c66a51902 --- /dev/null +++ b/tests/quickstarter/docs/steps/upload.md @@ -0,0 +1,178 @@ +# upload Step + +Uploads files to a Bitbucket repository with optional template rendering support. Useful for committing test artifacts, configuration files, or generated content back to the repository. + +## Configuration + +```yaml +- type: upload + description: "Upload test artifact to repository" + uploadParams: + file: "artifacts/test-output.json" + filename: "test-results.json" # optional, defaults to basename of file + repository: "{{.ComponentID}}" # optional, defaults to component repository + render: true # optional, renders file as Go template before upload +``` + +## Parameters + +### `file` (required) +Path to the file to upload, relative to the test data directory. +Supports Go template rendering (e.g., `outputs/{{.ComponentID}}_result.txt`). + +### `filename` (optional) +The target filename in the repository. If not specified, defaults to the basename of the source file. +Example: `test-output.json` → uploaded as `test-output.json` + +### `repository` (optional) +The Bitbucket repository to upload to. Defaults to the component's repository. +Supports Go template rendering (e.g., `{{.ComponentID}}-tests`). + +### `render` (optional) +Whether to render the file as a Go template before uploading. Defaults to `false`. +When `true`, the file is processed using Go's `text/template` package with access to template data (project ID, component ID, etc.). + +## How It Works + +1. **File Resolution**: Locates the file in the test data directory +2. **Template Rendering** (if enabled): Processes the file as a Go template with test context +3. **Bitbucket Upload**: Uploads the file to the target repository using git-based upload script +4. **Success Logging**: Reports the successful upload to the configured BitBucket URL +5. **Error Handling**: Fails the test if upload fails + +## Template Data Available + +When `render: true`, files have access to all template variables: + +- **`{{.ProjectID}}`**: The project identifier +- **`{{.ComponentID}}`**: The component identifier +- **`{{.OdsNamespace}}`**: The ODS namespace +- Custom variables from the test configuration + +## Examples + +**Upload a static file:** +```yaml +- type: upload + uploadParams: + file: "results/test-summary.txt" + filename: "test-summary.txt" +``` + +**Upload with custom filename:** +```yaml +- type: upload + uploadParams: + file: "output.json" + filename: "{{.ComponentID}}-test-results.json" +``` + +**Upload to different repository:** +```yaml +- type: upload + uploadParams: + file: "artifacts/deployment-log.txt" + repository: "shared-resources" + filename: "deployment-logs/{{.ComponentID}}-deployment.txt" +``` + +**Upload with template rendering:** +```yaml +- type: upload + uploadParams: + file: "templates/config.yml" + render: true + filename: "config/{{.ComponentID}}-config.yml" +``` + +The template file `templates/config.yml`: +```yaml +project: {{.ProjectID}} +component: {{.ComponentID}} +namespace: {{.OdsNamespace}} +``` + +**Upload test results:** +```yaml +- type: run + runParams: + file: "test-runner.sh" + +- type: upload + uploadParams: + file: "test-output/results.json" + filename: "test-results-{{.ComponentID}}.json" +``` + +**Upload rendered test report:** +```yaml +- type: upload + uploadParams: + file: "templates/report.html" + render: true + filename: "reports/{{.ComponentID}}-report-{{.Timestamp}}.html" +``` + +## Common Scenarios + +**Save build artifacts:** +```yaml +- type: run + runParams: + file: "build.sh" + +- type: upload + uploadParams: + file: "build-output/artifact.jar" + repository: "{{.ComponentID}}-builds" + filename: "builds/{{.ComponentID}}-{{.BuildNumber}}.jar" +``` + +**Upload configuration after generation:** +```yaml +- type: run + runParams: + file: "generate-config.sh" + +- type: upload + uploadParams: + file: "generated/config.yaml" + filename: "config/generated-{{.ComponentID}}.yaml" +``` + +**Upload test report:** +```yaml +- type: run + runParams: + file: "run-tests.sh" + +- type: upload + uploadParams: + file: "test-reports/report.xml" + filename: "reports/test-report-{{.ComponentID}}.xml" +``` + +**Upload with template rendering for environment-specific config:** +```yaml +- type: upload + uploadParams: + file: "config-template.properties" + render: true + filename: "config/{{.ComponentID}}.properties" +``` + +Template file content: +```properties +app.name={{.ComponentID}} +project={{.ProjectID}} +environment={{.Environment}} +``` + +## Best Practices + +- **Use descriptive filenames**: Include component ID, timestamp, or version in filenames +- **Organize uploads**: Use subdirectories in the filename (e.g., `reports/`, `artifacts/`) +- **Template rendering**: Only enable rendering if your file contains template variables +- **Error recovery**: Ensure uploaded files don't break the build if they contain errors +- **Repository organization**: Use separate repositories for different artifact types when possible +- **Cleanup**: Consider cleanup strategies for old uploads to avoid repository bloat diff --git a/tests/quickstarter/docs/steps/wait.md b/tests/quickstarter/docs/steps/wait.md new file mode 100644 index 000000000..15b5b782d --- /dev/null +++ b/tests/quickstarter/docs/steps/wait.md @@ -0,0 +1,143 @@ +# wait Step + +Waits for asynchronous operations to complete by polling for specific conditions on Kubernetes resources or HTTP endpoints. + +## Configuration + +```yaml +- type: wait + description: "Wait for pod to be ready" + waitParams: + condition: "pod-ready" + resource: "pod/{{.ComponentID}}" + namespace: "{{.ProjectID}}-dev" # optional, defaults to {{.ProjectID}}-dev + timeout: "300s" # optional, defaults to 300s + interval: "5s" # optional, defaults to 5s +``` + +## Parameters + +### `condition` (required) +The condition to wait for. Supported values: + +- **`pod-ready`**: Wait for a pod to reach Ready state +- **`deployment-complete`**: Wait for a deployment to complete its rollout +- **`job-complete`**: Wait for a Kubernetes job to complete +- **`route-accessible`**: Wait for an OpenShift route to be accessible +- **`http-accessible`**: Wait for an HTTP endpoint to respond with 2xx or 3xx status +- **`log-contains`**: Wait for a specific message to appear in logs + +### `resource` (required for pod, deployment, job, route, log conditions) +The Kubernetes resource to wait for. Format: `/` or just ``. +Supports Go template rendering (e.g., `{{.ComponentID}}`). + +### `url` (required for http-accessible) +The HTTP URL to wait for. Supports Go template rendering. + +### `message` (required for log-contains) +The log message to wait for. Supports Go template rendering. + +### `namespace` (optional) +Kubernetes namespace where the resource runs. Defaults to `{{.ProjectID}}-dev`. +Supports Go template rendering. + +### `timeout` (optional) +Maximum time to wait. Defaults to `300s`. Accepts Go duration format (e.g., `60s`, `5m`). + +### `interval` (optional) +How often to check the condition. Defaults to `5s`. Accepts Go duration format. + +## How It Works + +1. **Polling Loop**: Repeatedly checks the condition until timeout is reached +2. **Success**: Returns when the condition is met +3. **Failure**: Times out and fails the test if condition is not met within timeout period +4. **Retries**: Uses the interval to determine how often to check + +## Examples + +**Wait for pod to be ready:** +```yaml +- type: wait + waitParams: + condition: "pod-ready" + resource: "{{.ComponentID}}" + timeout: "120s" +``` + +**Wait for deployment to complete:** +```yaml +- type: wait + waitParams: + condition: "deployment-complete" + resource: "deployment/{{.ComponentID}}" + timeout: "300s" + interval: "10s" +``` + +**Wait for job to complete:** +```yaml +- type: wait + waitParams: + condition: "job-complete" + resource: "job/my-job" + namespace: "{{.ProjectID}}-dev" + timeout: "600s" +``` + +**Wait for route to be accessible:** +```yaml +- type: wait + waitParams: + condition: "route-accessible" + resource: "{{.ComponentID}}" + timeout: "180s" + interval: "5s" +``` + +**Wait for HTTP endpoint to be accessible:** +```yaml +- type: wait + waitParams: + condition: "http-accessible" + url: "http://{{.ComponentID}}.example.com/health" + timeout: "120s" + interval: "10s" +``` + +**Wait for log message:** +```yaml +- type: wait + waitParams: + condition: "log-contains" + resource: "{{.ComponentID}}" + message: "Application started successfully" + timeout: "60s" + interval: "5s" +``` + +## Common Scenarios + +**Wait after deployment with custom interval:** +```yaml +- type: wait + waitParams: + condition: "deployment-complete" + resource: "{{.ComponentID}}" + namespace: "{{.ProjectID}}-dev" + timeout: "300s" + interval: "15s" +``` + +**Chain multiple waits:** +```yaml +- type: wait + waitParams: + condition: "pod-ready" + resource: "{{.ComponentID}}" + +- type: wait + waitParams: + condition: "http-accessible" + url: "http://localhost:8080/health" +``` diff --git a/tests/quickstarter/logger/logger.go b/tests/quickstarter/logger/logger.go new file mode 100644 index 000000000..a6f55af71 --- /dev/null +++ b/tests/quickstarter/logger/logger.go @@ -0,0 +1,220 @@ +package logger + +import ( + "fmt" + "os" + + "github.com/charmbracelet/log" +) + +var logger *log.Logger + +// Init initializes the logger with charmbracelet/log +func Init() { + logger = log.New(os.Stderr) + logger.SetLevel(log.DebugLevel) + logger.SetReportCaller(false) + logger.SetReportTimestamp(true) +} + +// GetLogger returns the global logger instance +func GetLogger() *log.Logger { + if logger == nil { + Init() + } + return logger +} + +// Section prints a prominent section header +func Section(title string) { + if logger == nil { + Init() + } + logger.Infof("\n%s═══════════════════════════════════════════════════════════════════════════════%s\n %s%s%s\n%s═══════════════════════════════════════════════════════════════════════════════%s\n", + "╔", "╗", + "🚀 ", title, "", + "╚", "╝") +} + +// SubSection prints a sub-section header +func SubSection(title string) { + if logger == nil { + Init() + } + logger.Infof("\n%s─────────────────────────────────────────────────────────────────────────────────\n %s%s\n%s─────────────────────────────────────────────────────────────────────────────────\n", + "┌", "📋 ", title, "└") +} + +// Step logs the start of a test step +func Step(stepNumber, totalSteps int, stepType, description string) { + if logger == nil { + Init() + } + logger.Infof("\n%s┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄%s\n ▶️ Step %d/%d [%s]: %s\n%s┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄%s\n", + "┌", "┐", stepNumber, totalSteps, stepType, description, "└", "┘") +} + +// StepVerification logs a verification step +func StepVerification(description string) { + if logger == nil { + Init() + } + logger.Infof("\n%s┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄%s\n 🔍 Verifying: %s\n%s┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄┄%s\n", + "┌", "┐", description, "└", "┘") +} + +// StepSuccess logs successful step completion +func StepSuccess(stepType string) { + if logger == nil { + Init() + } + logger.Infof("✅ Step [%s] completed successfully", stepType) +} + +// StepFailed logs failed step completion +func StepFailed(stepType string, err error) { + if logger == nil { + Init() + } + logger.Errorf("❌ Step [%s] failed: %v", stepType, err) +} + +// Info logs informational messages +func Info(msg string, args ...interface{}) { + if logger == nil { + Init() + } + if len(args) > 0 { + logger.Infof(msg, args...) + } else { + logger.Info(msg) + } +} + +// Debug logs debug messages +func Debug(msg string, args ...interface{}) { + if logger == nil { + Init() + } + if len(args) > 0 { + logger.Debugf(msg, args...) + } else { + logger.Debug(msg) + } +} + +// Warn logs warning messages +func Warn(msg string, args ...interface{}) { + if logger == nil { + Init() + } + if len(args) > 0 { + logger.Warnf(msg, args...) + } else { + logger.Warn(msg) + } +} + +// Error logs error messages +func Error(msg string, args ...interface{}) { + if logger == nil { + Init() + } + if len(args) > 0 { + logger.Errorf(msg, args...) + } else { + logger.Error(msg) + } +} + +// Running logs an operation that is running +func Running(operation string) { + if logger == nil { + Init() + } + logger.Infof("⚙️ Running: %s", operation) +} + +// Success logs a successful operation +func Success(operation string) { + if logger == nil { + Init() + } + logger.Infof("✅ Success: %s", operation) +} + +// Failure logs a failed operation +func Failure(operation string, err error) { + if logger == nil { + Init() + } + logger.Errorf("❌ Failure: %s - %v", operation, err) +} + +// Waiting logs a waiting operation +func Waiting(operation string) { + if logger == nil { + Init() + } + logger.Infof("⏳ Waiting: %s", operation) +} + +// Completed logs a completed operation +func Completed(operation string) { + if logger == nil { + Init() + } + logger.Infof("✓ Completed: %s", operation) +} + +// KeyValue logs key-value pairs for debugging +func KeyValue(key string, value interface{}) { + if logger == nil { + Init() + } + logger.Debugf(" 🔹 %s: %v", key, value) +} + +// List logs a list item +func List(item string) { + if logger == nil { + Init() + } + logger.Infof(" • %s", item) +} + +// Separator prints a visual separator +func Separator() { + if logger == nil { + Init() + } + fmt.Println("─────────────────────────────────────────────────────────────────────────────────") +} + +// TestSummary logs test summary information +func TestSummary(componentName string, totalSteps int, failedSteps int) { + if logger == nil { + Init() + } + status := "✅" + if failedSteps > 0 { + status = "❌" + } + logger.Infof("\n%s Test Summary for %s: %d/%d steps passed\n", status, componentName, totalSteps-failedSteps, totalSteps) +} + +// Interrupt logs an interrupt signal +func Interrupt() { + if logger == nil { + Init() + } + logger.Warnf("⚠️ Interrupt received, cleaning up...") +} + +// Exception logs an exception/panic condition +func Exception(msg string, err error) { + if logger == nil { + Init() + } + logger.Errorf("⚡ Exception: %s - %v", msg, err) +} diff --git a/tests/quickstarter/openshift.go b/tests/quickstarter/openshift.go deleted file mode 100644 index 521a9df98..000000000 --- a/tests/quickstarter/openshift.go +++ /dev/null @@ -1,38 +0,0 @@ -package quickstarter - -import ( - "bytes" - "fmt" - "os/exec" -) - -func deleteOpenShiftResources(projectID string, componentID string, namespace string) error { - fmt.Printf("-- starting cleanup for component: %s\n", componentID) - label := fmt.Sprintf("app=%s-%s", projectID, componentID) - fmt.Printf("-- delete resources labelled with: %s\n", label) - stdout, stderr, err := runOcCmd([]string{ - "-n", namespace, - "delete", "all", "-l", label, - }) - if err != nil { - return fmt.Errorf( - "Could not delete all resources labelled with %s: \nStdOut: %s\nStdErr: %s\n\nErr: %w", - label, - stdout, - stderr, - err, - ) - } - - fmt.Printf("-- cleaned up resources with label: %s\n", label) - return nil -} - -func runOcCmd(args []string) (string, string, error) { - cmd := exec.Command("oc", args...) - var stdout, stderr bytes.Buffer - cmd.Stdout = &stdout - cmd.Stderr = &stderr - err := cmd.Run() - return stdout.String(), stderr.String(), err -} diff --git a/tests/quickstarter/quickstarter_test.go b/tests/quickstarter/quickstarter_test.go index 2e7e72872..e2a259345 100644 --- a/tests/quickstarter/quickstarter_test.go +++ b/tests/quickstarter/quickstarter_test.go @@ -1,18 +1,17 @@ package quickstarter import ( - "bytes" - b64 "encoding/base64" "fmt" - "io/ioutil" "os" + "os/signal" "path/filepath" - "regexp" - "strconv" "strings" + "syscall" "testing" - "text/template" + "github.com/opendevstack/ods-core/tests/quickstarter/logger" + "github.com/opendevstack/ods-core/tests/quickstarter/reporting" + "github.com/opendevstack/ods-core/tests/quickstarter/steps" "github.com/opendevstack/ods-core/tests/utils" ) @@ -25,10 +24,30 @@ import ( // "ods-quickstarters". If the argument ends with "...", all directories with a // "testdata" directory are tested, otherwise only the given folder is run. func TestQuickstarter(t *testing.T) { + // Initialize the logger + logger.Init() + log := logger.GetLogger() + + log.Infof("🚀 Starting Quickstarter Test Framework\n") + + // Ensure cleanup of port-forwards even on panic or interrupt + defer steps.CleanupAllPortForwards() + + // Setup signal handler for graceful shutdown (Ctrl+C) + sigChan := make(chan os.Signal, 1) + signal.Notify(sigChan, os.Interrupt, syscall.SIGTERM) + go func() { + <-sigChan + logger.Interrupt() + steps.CleanupAllPortForwards() + os.Exit(1) + }() var quickstarterPaths []string odsCoreRootPath := "../.." - target := os.Args[len(os.Args)-1] + project := os.Args[len(os.Args)-1] + utils.Set_project_name(project) + target := os.Args[len(os.Args)-2] if strings.HasPrefix(target, ".") || strings.HasPrefix(target, "/") { if strings.HasSuffix(target, "...") { quickstarterPaths = collectTestableQuickstarters( @@ -51,226 +70,135 @@ func TestQuickstarter(t *testing.T) { quickstarterPaths = []string{fmt.Sprintf("%s/../%s", odsCoreRootPath, target)} } } + dir, err := os.Getwd() + if err != nil { + logger.Error("Failed to get working directory: %v", err) + return + } + quickstarterPaths = utils.RemoveExcludedQuickstarters(t, dir, quickstarterPaths) config, err := utils.ReadConfiguration() if err != nil { t.Fatal(err) } - cdUserPassword, err := b64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) - if err != nil { - t.Fatalf("Error decoding cd_user password: %s", err) - } - fmt.Printf("\n\nRunning test steps found in the following directories:\n") + logger.Section("Test Paths") + logger.List(fmt.Sprintf("Found %d quickstarter(s) to test:", len(quickstarterPaths))) for _, quickstarterPath := range quickstarterPaths { - fmt.Printf("- %s\n", quickstarterPath) + logger.List(quickstarterPath) } - fmt.Printf("\n\n") for _, quickstarterPath := range quickstarterPaths { testdataPath := fmt.Sprintf("%s/testdata", quickstarterPath) quickstarterRepo := filepath.Base(filepath.Dir(quickstarterPath)) quickstarterName := filepath.Base(quickstarterPath) - fmt.Printf("\n\n\n\n") - fmt.Printf("Running tests for quickstarter %s\n", quickstarterName) - fmt.Printf("\n\n") - - freeUnusedResources(t) - restartAtlassianSuiteIfLicenseExpiresInLessThan(t) + logger.Section(fmt.Sprintf("Testing Quickstarter: %s", quickstarterName)) // Run each quickstarter test in a subtest to avoid exiting early // when t.Fatal is used. t.Run(quickstarterName, func(t *testing.T) { t.Parallel() + // Ensure port-forwards are cleaned up after each subtest + defer steps.CleanupAllPortForwards() + s, err := readSteps(testdataPath) if err != nil { t.Fatal(err) } + // Create test report for this quickstarter + report := reporting.NewTestReport(quickstarterName) + defer func() { + report.Finalize() + log := logger.GetLogger() + log.Infof("\n%s\n", report.String()) + + // Optionally export reports (can be controlled via env var) + if os.Getenv("EXPORT_TEST_REPORTS") == "true" { + reportFile := filepath.Join(testdataPath, fmt.Sprintf("test-report-%s.json", quickstarterName)) + if err := reporting.ExportJSON(report, reportFile); err != nil { + log.Warnf("Failed to export JSON report: %v", err) + } + } + }() + + // Create shared template data outside the loop so it persists across steps + // This allows steps like expose-service to store data for later steps to use + tmplData := steps.CreateTemplateData(config, s.ComponentID, "", utils.PROJECT_NAME) + + logger.SubSection(fmt.Sprintf("Component: %s", s.ComponentID)) + logger.List(fmt.Sprintf("Total steps to execute: %d", len(s.Steps))) + for i, step := range s.Steps { // Step might overwrite component ID if len(step.ComponentID) == 0 { step.ComponentID = s.ComponentID } - fmt.Printf( - "\n\nRun step #%d (%s) of quickstarter %s/%s ... %s\n", - (i + 1), + + // Check if step should be skipped + if steps.ShouldSkipStep(t, &step, tmplData) { + logger.Info("⊘ Skipping step %d: %s (skip=%v, skipIf=%q)", i+1, step.Type, step.Skip, step.SkipIf) + continue + } + + logger.Step( + i+1, + len(s.Steps), step.Type, - quickstarterRepo, - quickstarterName, step.Description, ) + report.RecordStepStart(i, step.Type, step.Description) repoName := fmt.Sprintf("%s-%s", strings.ToLower(utils.PROJECT_NAME), step.ComponentID) - if step.Type == "upload" { - if len(step.UploadParams.Filename) == 0 { - step.UploadParams.Filename = filepath.Base(step.UploadParams.File) - } - stdout, stderr, err := utils.RunScriptFromBaseDir("tests/scripts/upload-file-to-bitbucket.sh", []string{ - fmt.Sprintf("--bitbucket=%s", config["BITBUCKET_URL"]), - fmt.Sprintf("--user=%s", config["CD_USER_ID"]), - fmt.Sprintf("--password=%s", cdUserPassword), - fmt.Sprintf("--project=%s", utils.PROJECT_NAME), - fmt.Sprintf("--repository=%s", repoName), - fmt.Sprintf("--file=%s/%s", testdataPath, step.UploadParams.File), - fmt.Sprintf("--filename=%s", step.UploadParams.Filename), - }, []string{}) + // Execute the appropriate step based on type with error handling + var stepErr error + executor := steps.NewStepExecutor(testdataPath, tmplData) - if err != nil { - t.Fatalf( - "Execution of `upload-file-to-bitbucket.sh` failed: \nStdOut: %s\nStdErr: %s\nErr: %s\n", - stdout, - stderr, - err) - } else { - fmt.Printf("Uploaded file %s to %s\n", step.UploadParams.File, config["BITBUCKET_URL"]) - } - continue + // Get the handler from the registry + handler, err := steps.DefaultRegistry().Get(step.Type) + if err != nil { + t.Fatalf("Step %d failed: %v", i+1, err) } - var request utils.RequestBuild - var pipelineName string - var jenkinsfile string - var verify *TestStepVerify - tmplData := templateData(config, step.ComponentID, "") - if step.Type == "provision" { - // cleanup and create bb resources for this test - err = recreateBitbucketRepo(config, utils.PROJECT_NAME, repoName) - if err != nil { - t.Fatal(err) - } - err = deleteOpenShiftResources(utils.PROJECT_NAME, step.ComponentID, utils.PROJECT_NAME_DEV) - if err != nil { - t.Fatal(err) - } - branch := config["ODS_GIT_REF"] - if len(step.ProvisionParams.Branch) > 0 { - branch = renderTemplate(t, step.ProvisionParams.Branch, tmplData) - } - agentImageTag := config["ODS_IMAGE_TAG"] - if len(step.ProvisionParams.AgentImageTag) > 0 { - agentImageTag = renderTemplate(t, step.ProvisionParams.AgentImageTag, tmplData) - } - sharedLibraryRef := agentImageTag - if len(step.ProvisionParams.SharedLibraryRef) > 0 { - sharedLibraryRef = renderTemplate(t, step.ProvisionParams.SharedLibraryRef, tmplData) - } - env := []utils.EnvPair{ - { - Name: "ODS_NAMESPACE", - Value: config["ODS_NAMESPACE"], - }, - { - Name: "ODS_GIT_REF", - Value: config["ODS_GIT_REF"], - }, - { - Name: "ODS_IMAGE_TAG", - Value: config["ODS_IMAGE_TAG"], - }, - { - Name: "ODS_BITBUCKET_PROJECT", - Value: config["ODS_BITBUCKET_PROJECT"], - }, - { - Name: "AGENT_IMAGE_TAG", - Value: agentImageTag, - }, - { - Name: "SHARED_LIBRARY_REF", - Value: sharedLibraryRef, - }, - { - Name: "PROJECT_ID", - Value: utils.PROJECT_NAME, - }, - { - Name: "COMPONENT_ID", - Value: step.ComponentID, - }, - { - Name: "GIT_URL_HTTP", - Value: fmt.Sprintf("%s/%s/%s.git", config["REPO_BASE"], utils.PROJECT_NAME, repoName), - }, - } - request = utils.RequestBuild{ - Repository: quickstarterRepo, - Branch: branch, - Project: config["ODS_BITBUCKET_PROJECT"], - Env: append(env, step.ProvisionParams.Env...), - } - // If quickstarter is overwritten, use that value. Otherwise - // we use the quickstarter under test. - if len(step.ProvisionParams.Quickstarter) > 0 { - jenkinsfile = fmt.Sprintf("%s/Jenkinsfile", step.ProvisionParams.Quickstarter) - } else { - jenkinsfile = fmt.Sprintf("%s/Jenkinsfile", quickstarterName) - } - pipelineName = step.ProvisionParams.Pipeline - verify = step.ProvisionParams.Verify - } else if step.Type == "build" { - branch := "master" - if len(step.BuildParams.Branch) > 0 { - branch = renderTemplate(t, step.BuildParams.Branch, tmplData) - } - request = utils.RequestBuild{ - Repository: repoName, - Branch: branch, - Project: utils.PROJECT_NAME, - Env: step.BuildParams.Env, - } - jenkinsfile = "Jenkinsfile" - pipelineName = step.BuildParams.Pipeline - verify = step.BuildParams.Verify - } - buildName, err := utils.RunJenkinsPipeline(jenkinsfile, request, pipelineName) - if err != nil { - t.Fatal(err) + // Build execution parameters + params := &steps.ExecutionParams{ + TestdataPath: testdataPath, + TmplData: tmplData, + RepoName: repoName, + QuickstarterRepo: quickstarterRepo, + QuickstarterName: quickstarterName, + Config: config, + ProjectName: utils.PROJECT_NAME, } - verifyPipelineRun(t, step, verify, testdataPath, repoName, buildName, config) + // Execute the step with hooks + stepErr = executor.ExecuteWithHooks(t, &step, func() error { + return handler.Execute(t, &step, params) + }) + + if stepErr != nil { + report.RecordStepEnd(i, "failed", stepErr, nil) + t.Fatalf("Step %d failed: %v", i+1, stepErr) + } else { + report.RecordStepEnd(i, "passed", nil, nil) + logger.StepSuccess(step.Type) + } } - }) - } -} -func freeUnusedResources(t *testing.T) { - - // Run cleanup operations to ensure we always have enough resources. - stdout, stderr, err := utils.RunScriptFromBaseDir( - "tests/scripts/free-unused-resources.sh", - []string{}, []string{}, - ) + logger.Success(fmt.Sprintf("All steps completed for quickstarter %s", quickstarterName)) + }) - if err != nil { - t.Fatalf("Error cleaning up : \nStdOut: %s\nStdErr: %s\nErr: %s\n", stdout, stderr, err) - } else { - fmt.Printf("Cleaned cluster state.\n") } -} - -func restartAtlassianSuiteIfLicenseExpiresInLessThan(t *testing.T) { - // Run cleanup operations to ensure we always have enough resources. - stdout, stderr, err := utils.RunScriptFromBaseDir( - "ods-devenv/scripts/restart-atlassian-suite-if-license-expires-in-less-than.sh", - []string{"--hours-left", "2"}, []string{}, - ) - - if err != nil { - t.Fatalf("Error cleaning up : \nStdOut: %s\nStdErr: %s\nErr: %s\n", stdout, stderr, err) - } else { - fmt.Printf("Checked if needed to restart atlassian suite.\n") - } } // collectTestableQuickstarters collects all subdirs of "dir" that contain // a "testdata" directory. func collectTestableQuickstarters(t *testing.T, dir string) []string { testableQuickstarters := []string{} - files, err := ioutil.ReadDir(dir) + files, err := os.ReadDir(dir) if err != nil { t.Fatal(err) } @@ -286,159 +214,3 @@ func collectTestableQuickstarters(t *testing.T, dir string) []string { return utils.SortTestableQuickstarters(t, dir, testableQuickstarters) } - -func templateData(config map[string]string, componentID string, buildName string) TemplateData { - sanitizedOdsGitRef := strings.Replace(config["ODS_GIT_REF"], "/", "_", -1) - sanitizedOdsGitRef = strings.Replace(sanitizedOdsGitRef, "-", "_", -1) - var buildNumber string - if len(buildName) > 0 { - buildParts := strings.Split(buildName, "-") - buildNumber = buildParts[len(buildParts)-1] - } - return TemplateData{ - ProjectID: utils.PROJECT_NAME, - ComponentID: componentID, - OdsNamespace: config["ODS_NAMESPACE"], - OdsGitRef: config["ODS_GIT_REF"], - OdsImageTag: config["ODS_IMAGE_TAG"], - OdsBitbucketProject: config["ODS_BITBUCKET_PROJECT"], - SanitizedOdsGitRef: sanitizedOdsGitRef, - BuildNumber: buildNumber, - } -} - -// verifyPipelineRun checks that all expected values from the TestStepVerify -// definition are present. -func verifyPipelineRun(t *testing.T, step TestStep, verify *TestStepVerify, testdataPath string, repoName string, buildName string, config map[string]string) { - if verify == nil { - fmt.Println("Nothing to verify for", buildName) - return - } - - tmplData := templateData(config, step.ComponentID, buildName) - - if len(verify.JenkinsStages) > 0 { - fmt.Printf("Verifying Jenkins stages of %s ...\n", buildName) - stages, err := utils.RetrieveJenkinsBuildStagesForBuild(utils.PROJECT_NAME_CD, buildName) - if err != nil { - t.Fatal(err) - } - fmt.Printf("%s pipeline run for %s returned:\n%s", step.Type, step.ComponentID, stages) - err = utils.VerifyJenkinsStages( - fmt.Sprintf("%s/%s", testdataPath, verify.JenkinsStages), - stages, - ) - if err != nil { - t.Fatal(err) - } - } - - if len(verify.SonarScan) > 0 { - fmt.Printf("Verifying Sonar scan of %s ...\n", buildName) - sonarscan, err := retrieveSonarScan(repoName, config) - if err != nil { - t.Fatal(err) - } - err = verifySonarScan( - step.ComponentID, - fmt.Sprintf("%s/%s", testdataPath, verify.SonarScan), - sonarscan, - tmplData, - ) - if err != nil { - t.Fatal(err) - } - } - - if len(verify.RunAttachments) > 0 { - fmt.Printf("Verifying Jenkins run attachments of %s ...\n", buildName) - artifactsToVerify := []string{} - for _, a := range verify.RunAttachments { - artifactsToVerify = append( - artifactsToVerify, - renderTemplate(t, a, tmplData), - ) - } - err := utils.VerifyJenkinsRunAttachments(utils.PROJECT_NAME_CD, buildName, artifactsToVerify) - if err != nil { - t.Fatal(err) - } - } - - if verify.TestResults > 0 { - fmt.Printf("Verifying unit tests of %s ...\n", buildName) - stdout, stderr, err := utils.RunScriptFromBaseDir("tests/scripts/print-jenkins-unittest-results.sh", []string{ - utils.PROJECT_NAME_CD, - buildName, - }, []string{}) - if err != nil { - t.Fatalf("Could not find unit tests for build:%s\nstdout: %s\nstderr:%s\nerr: %s\n", - buildName, stdout, stderr, err) - } - - r := regexp.MustCompile("([0-9]+) tests") - match := r.FindStringSubmatch(stdout) - if match == nil { - t.Fatalf("Could not find any unit tests for build:%s\nstdout: %s\nstderr:%s\nerr: %s\n", - buildName, stdout, stderr, err) - } - foundTests, err := strconv.Atoi(match[1]) - if err != nil { - t.Fatalf("Could not convert number of unit tests to int: %s", err) - } - if foundTests < verify.TestResults { - t.Fatalf("Expected %d unit tests, but found only %d for build:%s\n", - verify.TestResults, foundTests, buildName) - } - } - - if verify.OpenShiftResources != nil { - var ocNamespace string - if len(verify.OpenShiftResources.Namespace) > 0 { - ocNamespace = renderTemplate(t, verify.OpenShiftResources.Namespace, tmplData) - } else { - ocNamespace = utils.PROJECT_NAME_DEV - } - fmt.Printf("Verifying OpenShift resources of %s in %s ...\n", step.ComponentID, ocNamespace) - imageTags := []utils.ImageTag{} - for _, it := range verify.OpenShiftResources.ImageTags { - imageTags = append( - imageTags, - utils.ImageTag{ - Name: renderTemplate(t, it.Name, tmplData), - Tag: renderTemplate(t, it.Tag, tmplData), - }, - ) - } - resources := utils.Resources{ - Namespace: ocNamespace, - ImageTags: imageTags, - BuildConfigs: renderTemplates(t, verify.OpenShiftResources.BuildConfigs, tmplData), - DeploymentConfigs: renderTemplates(t, verify.OpenShiftResources.DeploymentConfigs, tmplData), - Services: renderTemplates(t, verify.OpenShiftResources.Services, tmplData), - ImageStreams: renderTemplates(t, verify.OpenShiftResources.ImageStreams, tmplData), - } - utils.CheckResources(resources, t) - } -} - -func renderTemplates(t *testing.T, tpls []string, tmplData TemplateData) []string { - rendered := []string{} - for _, tpl := range tpls { - rendered = append(rendered, renderTemplate(t, tpl, tmplData)) - } - return rendered -} - -func renderTemplate(t *testing.T, tpl string, tmplData TemplateData) string { - var attachmentBuffer bytes.Buffer - tmpl, err := template.New("attachment").Parse(tpl) - if err != nil { - t.Fatalf("Error parsing template: %s", err) - } - tmplErr := tmpl.Execute(&attachmentBuffer, tmplData) - if tmplErr != nil { - t.Fatalf("Error rendering template: %s", tmplErr) - } - return attachmentBuffer.String() -} diff --git a/tests/quickstarter/reporting/export.go b/tests/quickstarter/reporting/export.go new file mode 100644 index 000000000..9d9a2632a --- /dev/null +++ b/tests/quickstarter/reporting/export.go @@ -0,0 +1,97 @@ +package reporting + +import ( + "encoding/json" + "fmt" + "os" +) + +// JUnitTestSuite represents a JUnit test suite +type JUnitTestSuite struct { + Name string `xml:"name,attr"` + Tests int `xml:"tests,attr"` + Failures int `xml:"failures,attr"` + Skipped int `xml:"skipped,attr"` + Time string `xml:"time,attr"` + TestCase []JUnitTestCase `xml:"testcase"` +} + +// JUnitTestCase represents a single test case in JUnit format +type JUnitTestCase struct { + Name string `xml:"name,attr"` + ClassName string `xml:"classname,attr"` + Time string `xml:"time,attr"` + Failure *JUnitFailure `xml:"failure,omitempty"` + Skipped *JUnitSkipped `xml:"skipped,omitempty"` + StdErr string `xml:"system-err,omitempty"` +} + +// JUnitFailure represents a test failure in JUnit format +type JUnitFailure struct { + Message string `xml:"message,attr"` + Text string `xml:",chardata"` +} + +// JUnitSkipped represents a skipped test in JUnit format +type JUnitSkipped struct { + Message string `xml:"message,attr"` +} + +// ExportJUnitXML exports the test report in JUnit XML format (simplified JSON-based approach). +// For full XML compatibility, consider using a dedicated XML library. +func ExportJUnitXML(report *TestReport, outputPath string) error { + suite := JUnitTestSuite{ + Name: report.QuickstarterID, + Tests: report.Summary.TotalSteps, + Failures: report.Summary.FailedSteps, + Skipped: report.Summary.SkippedSteps, + Time: fmt.Sprintf("%.2f", report.TotalDuration.Seconds()), + } + + for _, step := range report.Steps { + tc := JUnitTestCase{ + Name: step.Type, + ClassName: fmt.Sprintf("%s.%s", report.QuickstarterID, step.Description), + Time: fmt.Sprintf("%.2f", step.Duration.Seconds()), + } + + if step.Status == "failed" && step.Error != "" { + tc.Failure = &JUnitFailure{ + Message: step.Error, + Text: step.Error, + } + } else if step.Status == "skipped" { + tc.Skipped = &JUnitSkipped{ + Message: "Step skipped", + } + } + + suite.TestCase = append(suite.TestCase, tc) + } + + // Export as JSON for now (can be converted to XML later if needed) + data, err := json.MarshalIndent(suite, "", " ") + if err != nil { + return err + } + + if err := os.WriteFile(outputPath, data, 0644); err != nil { + return fmt.Errorf("failed to write JUnit report: %w", err) + } + + return nil +} + +// ExportJSON exports the test report as JSON. +func ExportJSON(report *TestReport, outputPath string) error { + data, err := report.ToJSON() + if err != nil { + return err + } + + if err := os.WriteFile(outputPath, data, 0644); err != nil { + return fmt.Errorf("failed to write JSON report: %w", err) + } + + return nil +} diff --git a/tests/quickstarter/reporting/reporting.go b/tests/quickstarter/reporting/reporting.go new file mode 100644 index 000000000..76529615c --- /dev/null +++ b/tests/quickstarter/reporting/reporting.go @@ -0,0 +1,148 @@ +package reporting + +import ( + "encoding/json" + "fmt" + "sync" + "time" +) + +// TestReport contains aggregated test execution metrics. +type TestReport struct { + StartTime time.Time `json:"startTime"` + EndTime time.Time `json:"endTime"` + TotalDuration time.Duration `json:"totalDuration"` + QuickstarterID string `json:"quickstarterID"` + Steps []StepReport `json:"steps"` + Summary TestSummary `json:"summary"` + mu sync.RWMutex `json:"-"` +} + +// StepReport contains execution metrics for a single test step. +type StepReport struct { + Index int `json:"index"` + Type string `json:"type"` + Description string `json:"description"` + StartTime time.Time `json:"startTime"` + EndTime time.Time `json:"endTime"` + Duration time.Duration `json:"duration"` + Status string `json:"status"` // "passed", "failed", "skipped" + Error string `json:"error,omitempty"` + Context map[string]interface{} `json:"context,omitempty"` // Pod logs, events, etc. on failure +} + +// TestSummary provides overall test execution statistics. +type TestSummary struct { + TotalSteps int `json:"totalSteps"` + PassedSteps int `json:"passedSteps"` + FailedSteps int `json:"failedSteps"` + SkippedSteps int `json:"skippedSteps"` + SuccessRate float64 `json:"successRate"` + AverageDuration time.Duration `json:"averageDuration"` +} + +// NewTestReport creates a new test report for a quickstarter. +func NewTestReport(quickstarterID string) *TestReport { + return &TestReport{ + StartTime: time.Now(), + QuickstarterID: quickstarterID, + Steps: []StepReport{}, + } +} + +// RecordStepStart records the start of a step execution. +func (tr *TestReport) RecordStepStart(index int, stepType, description string) { + tr.mu.Lock() + defer tr.mu.Unlock() + + step := StepReport{ + Index: index, + Type: stepType, + Description: description, + StartTime: time.Now(), + Status: "running", + } + tr.Steps = append(tr.Steps, step) +} + +// RecordStepEnd records the completion of a step execution. +func (tr *TestReport) RecordStepEnd(index int, status string, err error, context map[string]interface{}) { + tr.mu.Lock() + defer tr.mu.Unlock() + + if index >= len(tr.Steps) { + return + } + + tr.Steps[index].EndTime = time.Now() + tr.Steps[index].Duration = tr.Steps[index].EndTime.Sub(tr.Steps[index].StartTime) + tr.Steps[index].Status = status + if err != nil { + tr.Steps[index].Error = err.Error() + } + if context != nil { + tr.Steps[index].Context = context + } +} + +// Finalize calculates the summary statistics and marks the report as complete. +func (tr *TestReport) Finalize() { + tr.mu.Lock() + defer tr.mu.Unlock() + + tr.EndTime = time.Now() + tr.TotalDuration = tr.EndTime.Sub(tr.StartTime) + + summary := TestSummary{ + TotalSteps: len(tr.Steps), + } + + totalDuration := time.Duration(0) + + for _, step := range tr.Steps { + switch step.Status { + case "passed": + summary.PassedSteps++ + case "failed": + summary.FailedSteps++ + case "skipped": + summary.SkippedSteps++ + } + totalDuration += step.Duration + } + + if summary.TotalSteps > 0 { + summary.SuccessRate = float64(summary.PassedSteps) / float64(summary.TotalSteps) * 100 + summary.AverageDuration = totalDuration / time.Duration(summary.TotalSteps) + } + + tr.Summary = summary +} + +// ToJSON serializes the report to JSON. +func (tr *TestReport) ToJSON() ([]byte, error) { + return json.MarshalIndent(tr, "", " ") +} + +// String returns a human-readable summary of the test report. +func (tr *TestReport) String() string { + s := tr.Summary + return fmt.Sprintf( + "Test Report: %s\n"+ + " Total Steps: %d\n"+ + " Passed: %d\n"+ + " Failed: %d\n"+ + " Skipped: %d\n"+ + " Success Rate: %.2f%%\n"+ + " Total Duration: %s\n"+ + " Avg Per Step: %s", + tr.QuickstarterID, + s.TotalSteps, + s.PassedSteps, + s.FailedSteps, + s.SkippedSteps, + s.SuccessRate, + tr.TotalDuration, + s.AverageDuration, + ) +} diff --git a/tests/quickstarter/resources/Jenkinsfile-create-jobs b/tests/quickstarter/resources/Jenkinsfile-create-jobs new file mode 100644 index 000000000..e7ee3c99b --- /dev/null +++ b/tests/quickstarter/resources/Jenkinsfile-create-jobs @@ -0,0 +1,160 @@ +def dockerRegistry +def ODS_PROJECT +def odsNamespace +def QUICK_STARTERS_URL +def odsTag +def JENKINS_URL +def FOLDER_NAME +def BITBUCKET_URL +def CREDENTIALS_ID + +properties([ + parameters([ + string( name: 'QUICK_STARTERS_URL', + defaultValue: 'https://github.com/opendevstack/ods-quickstarters.git', + description: 'URL of the quickstarters repository'), + string( + name: 'configFileId', + defaultValue: 'quickstarter-test-config', + description: 'ID of the managed config file containing environment configuration' + ) + ]) +]) + +node { + dockerRegistry = env.DOCKER_REGISTRY + + stage('Load Configuration') { + echo "Loading configuration from managed file: quickstarter-test-config" + def configContent + configFileProvider([configFile(fileId: 'quickstarter-test-config', variable: 'CONFIG_FILE')]) { + // Source the .env file to load variables + configContent = readProperties(file: env.CONFIG_FILE) + echo "✓ Configuration loaded successfully" + } + odsTag = '4.x' + ODS_PROJECT = configContent.ODS_PROJECT + ODS_NAMESPACE = configContent.ODS_NAMESPACE ?: 'ods' + BITBUCKET_URL = configContent.BITBUCKET_URL + CREDENTIALS_ID = configContent.CREDENTIALS_ID_PATTERN + JENKINS_URL = configContent.JENKINS_URL + ODS_QUICKSTARTERS_TESTS_BRANCH = configContent.ODS_QUICKSTARTERS_TESTS_BRANCH ?: 'master' + QUICK_STARTERS_URL = params.QUICK_STARTERS_URL + + // Extract repository name from URL + def repoName = QUICK_STARTERS_URL.tokenize('/')[-1] + FOLDER_NAME = repoName.replaceAll('\\.git$', '') + + echo """\ + ✓ Configuration loaded successfully + Docker Registry: ${dockerRegistry} + ODS Project: ${ODS_PROJECT} + ODS Namespace: ${ODS_NAMESPACE} + Bitbucket URL: ${BITBUCKET_URL} + Jenkins URL: ${JENKINS_URL} + Quickstarters Repo URL: ${QUICK_STARTERS_URL} + ODS Core - Quickstarters Branch: ${ODS_QUICKSTARTERS_TESTS_BRANCH} + FOLDER_NAME: ${FOLDER_NAME} + """.stripIndent() + } +} + +def conts = containerTemplate( + name: 'jnlp', + image: "${dockerRegistry}/${ODS_NAMESPACE}/jenkins-agent-base:${odsTag}", + workingDir: '/tmp', +) + +def podLabel = "ods-qs-create-jobs-${UUID.randomUUID().toString()}" + +podTemplate( + cloud: 'openshift', + label: podLabel, + containers: [conts], + volumes: [], + serviceAccount: 'jenkins' +) { + node(podLabel) { + stage('Init'){ + + git url: "${BITBUCKET_URL}/scm/${ODS_PROJECT}/ods-core.git", + branch: "${ODS_QUICKSTARTERS_TESTS_BRANCH}", + credentialsId: "${CREDENTIALS_ID}" + + dir('tests/quickstarter/resources/scripts/repo'){ + git url: "$QUICK_STARTERS_URL", + credentialsId: "${CREDENTIALS_ID}" + + } + } + + stage('Select Branches') { + script { + def availableBranches = [] + dir('tests/quickstarter/resources/scripts/repo') { + availableBranches = sh( + script: 'git branch -r | grep -v HEAD | sed "s/.*origin\\///" | sort -u', + returnStdout: true + ).trim().split('\n').toList() + + echo "Available branches in ${QUICK_STARTERS_URL}:" + availableBranches.each { branch -> + echo " - ${branch}" + } + } + + // Prompt for interactive branch selection + // Create boolean parameters for each branch + def branchParams = availableBranches.collect { branch -> + booleanParam( + name: branch.replaceAll('[^a-zA-Z0-9_]', '_'), + description: "Process branch: ${branch}", + defaultValue: false + ) + } + + def userInput = input( + message: 'Select branches to process', + parameters: branchParams + ) + + // Collect selected branches + def selectedBranches = [] + availableBranches.each { branch -> + def paramName = branch.replaceAll('[^a-zA-Z0-9_]', '_') + if (userInput instanceof Boolean) { + // Single branch case + if (userInput) { + selectedBranches.add(branch) + } + } else { + // Multiple branches case + if (userInput[paramName]) { + selectedBranches.add(branch) + } + } + } + + env.BRANCHES_TO_PROCESS = selectedBranches.join(' ') + echo "Selected branches: ${env.BRANCHES_TO_PROCESS}" + } + } + + stage('Create jobs') { + sh(script: """ + cd tests/quickstarter/resources/scripts + ./create_jobs.sh \\ + --quickstarters-repository ${QUICK_STARTERS_URL} \\ + --jenkins-url ${JENKINS_URL} \\ + --project ${ODS_PROJECT} \\ + --ods-ref 4.x \\ + --branches "${BRANCHES_TO_PROCESS}" \\ + --folder-name "${FOLDER_NAME}" \\ + --bitbucket-url ${BITBUCKET_URL} \\ + --credentials-id ${CREDENTIALS_ID} \\ + --ods-quickstarters-test-branch ${ODS_QUICKSTARTERS_TESTS_BRANCH} \\ + --no-clone + """) + } + } +} diff --git a/tests/quickstarter/resources/Jenkinsfile-qs b/tests/quickstarter/resources/Jenkinsfile-qs new file mode 100644 index 000000000..0d307c039 --- /dev/null +++ b/tests/quickstarter/resources/Jenkinsfile-qs @@ -0,0 +1,189 @@ +// List of QS that we want to exclude from the execution of the job. +def excludedQS = params.excludedQuickstarters?:'' + +// Jenkins DeploymentConfig environment variables +def dockerRegistry +def credentialsId +def sonarQualityProfile +def sonarQualityGate + +// Load configuration from managed config file +node { + stage('Load Configuration') { + echo "Loading configuration from managed file: quickstarter-test-config" + def configContent + def configProps = [:] + configFileProvider([configFile(fileId: 'quickstarter-test-config', variable: 'CONFIG_FILE')]) { + // Source the .env file to load variables + configContent = readProperties(file: env.CONFIG_FILE) + echo "✓ Configuration loaded successfully" + } + + // Set variables from config and parameters + dockerRegistry = env.DOCKER_REGISTRY + project = params.project ?: env.ODS_PROJECT + projectId = env.PROJECT_ID + odsRef = params.quickstarterRef ?: env.ODS_GIT_REF + odsNamespace = env.ODS_NAMESPACE + odsGitRef = env.ODS_GIT_REF + odsImageTag = env.ODS_IMAGE_TAG ?: '4.x' + sharedLibraryRef = env.SHARED_LIBRARY_REF ?: odsImageTag + agentImageTag = env.AGENT_IMAGE_TAG ?: odsImageTag + odsMainBitbucketProject = env.ODS_BITBUCKET_PROJECT ?: 'ods' + + // Construct repository URLs from config + QUICK_STARTERS_URL = "${params.quickstartersRepositoryUrl}" + QUICK_STARTERS_BRANCH = params.quickstarterRef ?: env.ODS_GIT_REF + + ODS_CONFIGURATION_URL = "${configContent.BITBUCKET_URL}/scm/${project}/ods-configuration.git" + ODS_CONFIGURATION_BRANCH = configContent.ODS_CONFIGURATION_BRANCH + + ODS_CORE_URL = "${configContent.BITBUCKET_URL}/scm/${project}/ods-core.git" + ODS_QUICKSTARTERS_TESTS_BRANCH = configContent.ODS_QUICKSTARTERS_TESTS_BRANCH ?: 'master' + + // Credentials ID from pattern + credentialsId = configContent.CREDENTIALS_ID_PATTERN + + // List of QS that we want to exclude from the execution of the job + excludedQS = params.excludedQuickstarters ?: '' + + // Sonar config + sonarQualityProfile = configContent.SONAR_QUALITY_PROFILE + sonarQualityGate = configContent.SONAR_QUALITY_GATE + + echo """\ + ✓ Configuration loaded successfully + Project: ${project} + ODS Namespace: ${odsNamespace} + Bitbucket URL: ${configContent.BITBUCKET_URL} + Quickstarters Repo URL: ${QUICK_STARTERS_URL} + Quickstarters Branch: ${ODS_QUICKSTARTERS_TESTS_BRANCH} + ODS Core URL: ${ODS_CORE_URL} + Docker Registry: ${dockerRegistry} + Credentials ID: ${credentialsId} + Sonar Quality Profile: ${sonarQualityProfile} + Sonar Quality Gate: ${sonarQualityGate} + """.stripIndent() + } // End of stage +} // End of node + +def conts = containerTemplate( + name: 'jnlp', + image: "${dockerRegistry}/${odsNamespace}/jenkins-agent-golang:4.x", + workingDir: '/tmp', + alwaysPullImage: true, + args: '' +) + + +def podLabel = "qs-tests" +podTemplate( + label: podLabel, + cloud: 'openshift', + containers: [conts], + volumes: [], + serviceAccount: 'jenkins' +) { + node(podLabel) { + + stage('Init') { + currentBuild.description = "Testing QS" + echo "${WORKSPACE}" + sh "ls -Al ${WORKSPACE}" + sh "pwd" + sh "ls -Al" + + // Retrieve the quickstarter repository in the folder that corresponds to its name. + def quickstartersDirName = "${QUICK_STARTERS_URL}".substring(QUICK_STARTERS_URL.lastIndexOf('/') + 1).replace('.git', '') + dir(quickstartersDirName) { + echo "Checking out Quickstarters repository from ${QUICK_STARTERS_URL} with branch ${QUICK_STARTERS_BRANCH}" + git branch: "${QUICK_STARTERS_BRANCH}", + credentialsId: "${project}-cd-cd-user-with-password", + url: "${QUICK_STARTERS_URL}" + } + + // Retrive the configuration repository in the folder that corresponds to its name. + dir('ods-configuration') { + echo "Checking out ODS Configuration repository from ${ODS_CONFIGURATION_URL} with branch ${ODS_CONFIGURATION_BRANCH}" + git branch: "${ODS_CONFIGURATION_BRANCH}", + credentialsId: "${project}-cd-cd-user-with-password", + url: "${ODS_CONFIGURATION_URL}" + } + + dir('ods-core') { + echo "Checking out ODS Core repository from ${ODS_CORE_URL} with branch ${ODS_QUICKSTARTERS_TESTS_BRANCH}" + git branch: "${ODS_QUICKSTARTERS_TESTS_BRANCH}", + credentialsId: "${project}-cd-cd-user-with-password", + url: "${ODS_CORE_URL}" + + echo "Writing quickstarters exclusion list to ./tests/quickStartersExclusionList.txt" + writeFile file: './tests/quickStartersExclusionList.txt', text: excludedQS + } + + echo "${WORKSPACE}" + sh "ls -Al ${WORKSPACE}" + sh "pwd" + sh "ls -Al" + + } + + stage('updatecred') { + withCredentials([ + usernamePassword( + credentialsId: "${project}-cd-cd-user-with-password", + passwordVariable: 'PASSWD', + usernameVariable: 'USER') + ]) { + sh (returnStdout: false, script: ''' + #!/bin/sh -e + set +x + pushd ods-configuration/ + user_id=$(echo -n "$USER" | base64) + user_pwd=$(echo -n "$PASSWD" | base64) + trigger_secret_base64=$(oc get secret webhook-proxy -o json | jq -r '.data."trigger-secret"') + trigger_secret=$(echo $(oc get secret webhook-proxy -o json | jq -r '.data."trigger-secret"') | base64 --decode) + sed -i~ "/^CD_USER_ID=/s/=.*/=$USER/" ods-core.env + sed -i~ "/^CD_USER_ID_B64=/s/=.*/=$user_id/" ods-core.env + sed -i~ "/^CD_USER_PWD_B64=/s/=.*/=$user_pwd/" ods-core.env + sed -i~ "/^PIPELINE_TRIGGER_SECRET_B64=/s/=.*/=$trigger_secret_base64/" ods-core.env + sed -i~ "/^PIPELINE_TRIGGER_SECRET=/s/=.*/=$trigger_secret/" ods-core.env + sed -i~ "/^ODS_BITBUCKET_PROJECT=/s/=.*/=$project/" ods-core.env + popd + ''') + } + } + + stage('Test') { + echo "${WORKSPACE}" + + // If we select 'all' no one parameter will be provided, so it will try to test all the Quickstarters + // that have a test defined in the testdata folder. + def quickstarter_to_test = "" + if(params.quickstarter && params.quickstarter != "all") { + quickstarter_to_test = "-q ${quickstarter}" + } + + + // In different environments SONAR_QUALITY_PROFILE can be different, with this env variable we provide + // the needed value, in BI this value is 'Sonar way' + withEnv([ + "CGO_ENABLED=0", + "GOCACHE=${WORKSPACE}/.cache", + "GOMODCACHE=${WORKSPACE}/.cache", + "SONAR_QUALITY_PROFILE=sonar BI way", + "TMPL_SonarQualityGate=ODS Default Quality Gate", + "ODS_GIT_REF=${odsRef}"]) { + sh """ + cd ods-core/tests + ./quickstarter-test.sh -p ${project} ${quickstarter_to_test} || true + """ + } + } + + stage('Get test results') { + archiveArtifacts artifacts: 'ods-core/tests/*.txt', followSymlinks: false + archiveArtifacts artifacts: 'ods-core/tests/test-quickstarter-report.xml', followSymlinks: false + junit(testResults:"ods-core/tests/*.xml", allowEmptyResults:true) + } + } // End of node +} diff --git a/tests/quickstarter/resources/README.md b/tests/quickstarter/resources/README.md new file mode 100644 index 000000000..79662b84b --- /dev/null +++ b/tests/quickstarter/resources/README.md @@ -0,0 +1,87 @@ +# ODS Quickstarters Tests + +This repository contains the Jenkins pipelines needed to execute tests for ODS quickstarters in a generic, environment-agnostic way. + + +## Architecture + +The solution use: + +1. **Jenkinsfile-create-jobs**: Pipeline to create Jenkins jobs from quickstarter repositories +2. **Jenkinsfile-qs**: Individual quickstarter test pipeline (called by created jobs) + +## Setup Instructions + +Follow this sequence to set up and run quickstarter tests: + +### Step 1: Create the Managed Configuration File + +1. Go to **Manage Jenkins** → **Managed files**. +2. Add a new **Custom file** with ID `quickstarter-test-config`. +3. Copy the contents of `quickstarter-test.env.template` and replace the values with your environment-specific values. + +### Step 2: Create and Run the Job Creation Pipeline + +1. Create a Jenkins pipeline job pointing to `Jenkinsfile-create-jobs`. +2. Execute the job and provide the URL of the repository to be tested when prompted. +3. If the job fails due to script permissions, fix the execution permissions in your Jenkins instance and re-run. +4. Select the branches you want to process in the interactive prompt. + +After completion, you will get a folder per repository and a subfolder per branch. Inside each branch folder you will find one job per quickstarter and a job to run them all. If you do not want a quickstarter to run, disable its job. + +## Configuration File + +The managed configuration file (`quickstarter-test-config`) contains environment-specific parameters in `.env` format: + +```bash +# Bitbucket/Git Configuration +BITBUCKET_URL=https://bitbucket-myproject-cd.apps.example.com + +# OpenShift/Kubernetes Configuration +OPENSHIFT_APPS_BASEDOMAIN=apps.example.com + +# Project/Namespace Configuration +ODS_PROJECT=myproject +ODS_NAMESPACE=ods + +# Credentials Configuration +CREDENTIALS_ID_PATTERN=myproject-cd-cd-user-with-password +CREDENTIALS_TOKEN_ID_PATTERN=myproject-cd-cd-user-token + +# Git Repository References +ODS_CORE_BRANCH=master +ODS_CONFIGURATION_BRANCH=master +ODS_GIT_REF=master + +# Docker Registry +DOCKER_REGISTRY=docker-registry.default.svc:5000 + +# Sonar Configuration +SONAR_QUALITY_PROFILE=Sonar way +SONAR_QUALITY_GATE=ODS Default Quality Gate +``` + +## Updating Configuration + +To update the configuration for a different environment, update the managed file `quickstarter-test-config` with the new values. + +## Template File + +A template file (`quickstarter-test.env.template`) is provided as a reference showing all available configuration parameters with examples and descriptions. + +## Credentials + +The pipelines expect Jenkins credentials to be configured with IDs following the pattern: +- `${ODS_PROJECT}-cd-cd-user-with-password`: Username/password credentials +- `${ODS_PROJECT}-cd-cd-user-token`: Token-based credentials + +These credentials must be created in Jenkins before running the test pipelines. + +## Files + +- **Jenkinsfile-create-jobs**: Job creation pipeline +- **Jenkinsfile-qs**: Individual quickstarter test pipeline +- **quickstarter-test.env.template**: Configuration template reference +- **scripts/create_jobs.sh**: Script to create Jenkins jobs +- **scripts/job_template.xml**: XML template for Jenkins jobs +- **scripts/run_all.xml**: XML template to run all the Jenkins jobs in a folder diff --git a/tests/quickstarter/resources/quickstarter-test.env.template b/tests/quickstarter/resources/quickstarter-test.env.template new file mode 100644 index 000000000..1cce09a29 --- /dev/null +++ b/tests/quickstarter/resources/quickstarter-test.env.template @@ -0,0 +1,62 @@ +# Quickstarter Test Configuration + +# Bitbucket/Git Configuration +# Base URL of your Bitbucket instance (without /scm/) +# Example: https://bitbucket-myproject-cd.apps.example.com +BITBUCKET_URL=https://bitbucket-e2etsqs-cd.apps.us-test.ocp.aws.boehringer.com + +# OpenShift/Kubernetes Configuration +# Base domain for OpenShift applications +# Example: apps.example.com +OPENSHIFT_APPS_BASEDOMAIN=apps.us-test.ocp.aws.boehringer.com + +# Project/Namespace Configuration +# Main project identifier used for testing +# Example: quickstarterstests +ODS_PROJECT=quickstarterstests + +# ODS namespace where Jenkins agents and images are located +# Example: ods +ODS_NAMESPACE=ods + +# Credentials Configuration +# Jenkins credentials ID pattern for Git operations +# Example: quickstarterstests-cd-cd-user-with-password +CREDENTIALS_ID_PATTERN=quickstarterstests-cd-cd-user-with-password + +# Token-based credentials ID for certain operations +# Example: quickstarterstests-cd-cd-user-with-token +CREDENTIALS_TOKEN_ID_PATTERN=quickstarterstests-cd-cd-user-token + +# Git Repository References +# ODS Core repository branch/tag +ODS_CORE_BRANCH=4.x + +# ODS Configuration repository branch/tag +ODS_CONFIGURATION_BRANCH=master + +# Default ODS reference for shared library +ODS_GIT_REF=4.x + +# ODS Quickstarters tests branch/tag form ods-core +# Example: feature/improving-qs-testframework +ODS_QUICKSTARTERS_TESTS_BRANCH=feature/improving-qs-testframework + +# Docker Registry +# Internal OpenShift docker registry +# Example: docker-registry.default.svc:5000 +DOCKER_REGISTRY=docker-registry.default.svc:5000 + +# Sonar Configuration +# Sonar quality profile name (environment-specific) +# Example: My Company Sonar Way +SONAR_QUALITY_PROFILE=My Company Sonar Way + +# Default Sonar quality gate +# Example: My company Default Quality Gate +SONAR_QUALITY_GATE=My company Default Quality Gate + +# Jenkins URL +# The Jenkins instance where the tests runs +# Example: https://jenkins-quickstarterstests-cd.apps.my-cluster.com +JENKINS_URL=https://jenkins-quickstarterstests-cd.apps.my-cluster.com \ No newline at end of file diff --git a/tests/quickstarter/resources/scripts/create_jobs.sh b/tests/quickstarter/resources/scripts/create_jobs.sh new file mode 100755 index 000000000..a7a705922 --- /dev/null +++ b/tests/quickstarter/resources/scripts/create_jobs.sh @@ -0,0 +1,430 @@ +#!/bin/bash +##set -ue +#set -o pipefail + +me=${0##*/} + +QUICKSTARTERS_REPOSITORY_URL="" +OPENSHIFT_TOKEN="" +ODS_REF="master" +JENKINS_URL="" +PROJECT="" +BRANCHES="" +JENKINS_FOLDER_NAME="qs-automated-tests" +SCRIPT_PATH=$(pwd) +JOB_TEMPLATE="$SCRIPT_PATH/job_template.xml" +RUN_ALL_TEMPLATE="$SCRIPT_PATH/run_all.xml" +TMP_FOLDER=$SCRIPT_PATH/tmp +REPOSITORY_NAME="" +REPOSITORY_USERNAME="" +REPOSITORY_PASSWORD="" +NO_CLONE= + +echo_done(){ + echo -e "[DONE]: $@" +} + +echo_warn(){ + echo -e "[WARN]: $@" +} + +echo_error(){ + echo -e "[ERROR]: $@" +} + +echo_info(){ + echo -e "[INFO]: $@" +} + +function environment() { + echo_info "Environment:" + echo_info " SCRIPT_PATH: $SCRIPT_PATH" + echo_info " JOB_TEMPLATE: $JOB_TEMPLATE" + echo_info " RUN_ALL_TEMPLATE: $RUN_ALL_TEMPLATE" + echo_info " TMP_FOLDER: $TMP_FOLDER" + echo_info " PROJECT: $PROJECT" + echo_info " QUICKSTARTERS_REPOSITORY_URL: $QUICKSTARTERS_REPOSITORY_URL" + echo_info " BRANCHES: ${BRANCHES:-all}" + echo_info " JENKINS_FOLDER_NAME: $JENKINS_FOLDER_NAME" +} + +function prerrequisites() { + local error=0 + + echo_info "Verifying parameters" + + if [ -z "${OPENSHIFT_TOKEN}" ]; then + OPENSHIFT_TOKEN=$(oc whoami -t) + if [ -z "${OPENSHIFT_TOKEN}" ]; then + echo_warn "--openshift-token is mandatory or you must be logged in a cluster." + error=1 + fi + fi + + if [ -z "${QUICKSTARTERS_REPOSITORY_URL}" ]; then + echo_warn "--quickstarters-repository is mandatory" + error=1 + fi + + if [ -z "${JENKINS_URL}" ]; then + echo_warn "--jenkins-url is mandatory" + error=1 + fi + + if [ -z "${PROJECT}" ]; then + echo_warn "--project is mandatory" + error=1 + fi + + if [[ error -ne 0 ]]; then + usage + exit 1 + fi + + rm -Rf tmp + mkdir -p tmp + + # Extract the repository name + REPOSITORY_NAME=$(basename $QUICKSTARTERS_REPOSITORY_URL .git) +} + +function process_repo_qs_branch() { + # Create parent folder first + create_jenkins_folder "$JENKINS_FOLDER_NAME" + + if [ -z "${NO_CLONE}" ]; then + echo_info "Clone the Quickstarter Repository: $QUICKSTARTERS_REPOSITORY_URL" + rm -rf repo + git clone $QUICKSTARTERS_REPOSITORY_URL repo --quiet + [ $? -ne 0 ] && exit 1 + fi + + + ## Change to the repo directory + pushd repo + + # Get all branches + for branch in $(git branch -r | grep -v HEAD);do + local filtered_branch=$(echo $branch| sed 's/origin\///') + + # Skip branch if BRANCHES is specified and this branch is not in the list + if [ -n "$BRANCHES" ]; then + if ! echo "$BRANCHES" | grep -qw "$filtered_branch"; then + echo_info "Skipping branch: $filtered_branch (not in specified branches list)" + continue + fi + fi + + git checkout $filtered_branch --quiet + + create_jenkins_folder "$JENKINS_FOLDER_NAME/$filtered_branch" + + # Get directories in the root + for dir in $(ls -d */);do + # Check if directory contains a folder named 'testdata' + if [ -d "$dir/testdata" ]; then + create_job branch=$filtered_branch ods_ref=$ODS_REF template=$JOB_TEMPLATE qs=${dir//\//} + fi + done + + # Create the run_all orchestration job for this branch + echo_info "Creating run_all orchestration job for branch: ${filtered_branch}" + create_run_all_job branch=$filtered_branch + done + popd +} + +function replace_placeholders { + # Get the input and output files from the arguments + local INPUT_FILE=$1 + local OUTPUT_FILE=$2 + shift 2 + + echo_info "Creating file $OUTPUT_FILE from $INPUT_FILE" + # Create a copy of the input file to the output file + cp $INPUT_FILE $OUTPUT_FILE + + # Loop over the remaining arguments + for ARG in "$@"; do + # Use the IFS variable to split the argument into a placeholder and a value + IFS='=' read -r PLACEHOLDER REPLACEMENT_VALUE <<< "$ARG" + + # Use the sed command to replace the placeholder with the replacement value + sed -i "s|${PLACEHOLDER}|${REPLACEMENT_VALUE}|g" $OUTPUT_FILE + done +} + +# This bash function creates or updates a Jenkins job. +# It first checks if the job already exists, and if it does, it updates it. +# If it doesn't, it creates a new one. +# The job configuration is based on a template file, where placeholders are replaced with actual values. +# +# Arguments: +# $1 - The branch name +# $2 - The quickstarter name +# $3 - The OpenDevStack reference +# +# Globals: +# JOB_TEMPLATE - The path to the job configuration template file +# TMP_FOLDER - The path to the temporary folder where the job configuration file is stored +# PROJECT - The project name +# QUICKSTARTERS_REPOSITORY_URL - The URL of the quickstarters repository +# REPOSITORY_NAME - The name of the repository +# JENKINS_URL - The URL of the Jenkins server +# OPENSHIFT_TOKEN - The OpenShift token used for authentication +# +# Returns: +# None +function create_job() { + local branch="" + local qs="" + local ods_ref="" + local template="" + local job_name="" + local data_binary="" + local folder="" + + for arg in "$@" + do + IFS='=' read -r key value <<< "$arg" + case "$key" in + branch) branch="$value" ;; + qs) qs="$value" ;; + ods_ref) ods_ref="$value" ;; + template) template="$value" ;; + job_name) job_name="$value" ;; + esac + done + + echo_info "branch: ${branch}, ods_ref: ${ods_ref}, template: ${template}, qs: ${qs}, job_name: ${job_name}" + + local job_filename=${job_name:-"$branch-$qs-$ods_ref"}.xml + + if [ -z "$branch" ] || [ -z "$ods_ref" ] || [ -z "$template" ]; then + echo_error "(create_job): Missing mandatory parameters." + return 1 + fi + + replace_placeholders "$template" "$TMP_FOLDER/$job_filename" \ + "{{PROJECT}}=$PROJECT" \ + "{{QUICKSTARTERS_REPOSITORY_URL}}=$QUICKSTARTERS_REPOSITORY_URL" \ + "{{QS}}=$REPOSITORY_NAME/$qs" \ + "{{BRANCH}}=$branch" \ + "{{ODSREF}}=$ods_ref" \ + "{{BITBUCKET_URL}}=${BITBUCKET_URL}" \ + "{{CREDENTIALS_ID}}=${CREDENTIALS_ID}" \ + "{{OPENSHIFT_APPS_BASEDOMAIN}}=${OPENSHIFT_APPS_BASEDOMAIN}"\ + "{{ODS_QUICKSTARTERS_TESTS_BRANCH}}=${ODS_QUICKSTARTERS_TESTS_BRANCH}" + + echo_info "branch: $branch, qs: $qs, job_name: $job_name, folder: /job/$JENKINS_FOLDER_NAME/job/$branch" + local exists=1 + if [ -n "$job_name" ]; then + # For ALL-branch jobs, create directly under the branch folder + folder="/job/$JENKINS_FOLDER_NAME/job/$branch" + echo_info "Checking existence for: /$JENKINS_FOLDER_NAME/$branch/$job_name" + check_jenkins_resource_exists "/$JENKINS_FOLDER_NAME/$branch/$job_name" + exists=$? + else + # For regular QS jobs, create under the branch folder + folder="/job/$JENKINS_FOLDER_NAME/job/$branch" + echo_info "Checking existence for: /$JENKINS_FOLDER_NAME/$branch/$qs" + check_jenkins_resource_exists "/$JENKINS_FOLDER_NAME/$branch/$qs" + exists=$? + fi + local url="$JENKINS_URL$folder" + echo_info "Job existence check exit code: $exists" + if [[ $exists -eq 0 ]]; then + if [ -n "$job_name" ]; then + echo_info "Job '$JENKINS_FOLDER_NAME/$branch/$job_name' - ods-ref($ods_ref) already exists. If you want to recreate it, delete all resources related with it." + else + echo_info "Job '$JENKINS_FOLDER_NAME/$branch/$qs' - ods-ref($ods_ref) already exists. If you want to recreate it, delete all resources related with it." + fi + return 0 + fi + if [ -n "$job_name" ]; then + echo_info "Creating job '$JENKINS_FOLDER_NAME/$branch/$job_name' - ods-ref($ods_ref)" + else + echo_info "Creating job '$JENKINS_FOLDER_NAME/$branch/$qs' - ods-ref($ods_ref)" + fi + url+="/createItem?name=${job_name:-"$qs"}" + + curl -s $INSECURE -XPOST "$url" --data-binary @$TMP_FOLDER/$job_filename --header "Authorization: Bearer ${OPENSHIFT_TOKEN}" --header "Content-Type:text/xml" + [ $? -ne 0 ] && echo_warn "Error creating $job_name" +} + +# This function creates a run_all orchestration job for a branch. +# The job configuration is taken directly from run_all.xml without any placeholder substitution. +# +# Arguments: +# branch - The branch name +# +# Globals: +# RUN_ALL_TEMPLATE - The path to the run_all.xml template file +# TMP_FOLDER - The path to the temporary folder where the job configuration file is stored +# JENKINS_URL - The URL of the Jenkins server +# JENKINS_FOLDER_NAME - The Jenkins folder name +# OPENSHIFT_TOKEN - The OpenShift token used for authentication +# +# Returns: +# None +function create_run_all_job() { + local branch="" + + for arg in "$@" + do + IFS='=' read -r key value <<< "$arg" + case "$key" in + branch) branch="$value" ;; + esac + done + + if [ -z "$branch" ]; then + echo_error "(create_run_all_job): Missing mandatory branch parameter." + return 1 + fi + + local job_name="RUN-ALL" + local job_filename="${branch}-${job_name}.xml" + + echo_info "Creating run_all job for branch: $branch" + + # Copy the run_all.xml template without any substitution + cp "$RUN_ALL_TEMPLATE" "$TMP_FOLDER/$job_filename" + + # Check if the job already exists + local folder="/job/$JENKINS_FOLDER_NAME/job/$branch" + echo_info "Checking existence for: /$JENKINS_FOLDER_NAME/$branch/$job_name" + check_jenkins_resource_exists "/$JENKINS_FOLDER_NAME/$branch/$job_name" + local exists=$? + + if [[ $exists -eq 0 ]]; then + echo_info "Job '$JENKINS_FOLDER_NAME/$branch/$job_name' already exists. If you want to recreate it, delete all resources related with it." + return 0 + fi + + echo_info "Creating job '$JENKINS_FOLDER_NAME/$branch/$job_name'" + local url="$JENKINS_URL$folder/createItem?name=${job_name}" + + curl -s $INSECURE -XPOST "$url" --data-binary @$TMP_FOLDER/$job_filename --header "Authorization: Bearer ${OPENSHIFT_TOKEN}" --header "Content-Type:text/xml" + [ $? -ne 0 ] && echo_warn "Error creating $job_name" +} + +function check_jenkins_resource_exists() { + local resource_name=$1 + local jenkins_resource_name=$(echo ${resource_name} | sed 's/\//\/job\//g') + + echo_info "Checking if Job or Folder [${jenkins_resource_name}] exists..." + + local response=$(curl $INSECURE -s -o /dev/null -w "%{http_code}" -I -XGET "$JENKINS_URL${jenkins_resource_name}/config.xml" --header "Authorization: Bearer ${OPENSHIFT_TOKEN}") + if [[ "$response" -eq 200 ]]; then + echo_warn "Folder or Job [${resource_name}] exists" + return 0 + fi + echo_info "Folder or Job [${resource_name}] does not exist" + + return 1 +} + +function create_jenkins_folder() { + local folder_name=$1 + + check_jenkins_resource_exists /$folder_name + + if [[ $? -ne 0 ]]; then + echo_info "Creating folder: $folder_name" + + # Extract parent folder and folder name for nested folders + local parent_path="" + local folder_basename="${folder_name}" + + if [[ "$folder_name" == */* ]]; then + parent_path="${folder_name%/*}" + folder_basename="${folder_name##*/}" + # Convert parent path to Jenkins job URL format + parent_path="/job/${parent_path//\//\/job\/}" + fi + + curl -s $INSECURE -XPOST "${JENKINS_URL}${parent_path}/createItem?name=${folder_basename}&mode=com.cloudbees.hudson.plugins.folder.Folder&from=&json=%7B%22name%22%3A%22${folder_basename}%22%2C%22mode%22%3A%22com.cloudbees.hudson.plugins.folder.Folder%22%2C%22from%22%3A%22%22%2C%22Submit%22%3A%22OK%22%7D&Submit=OK" --header "Authorization: Bearer ${OPENSHIFT_TOKEN}" --header "Content-Type:application/x-www-form-urlencoded" + fi +} + +function usage { + printf "\n" + printf "This script creates Jenkins folders and Jobs for the existing Quickstarters in the provided repository.\n\n" + printf "Syntax ./${me} parameters.\n\n" + + printf "MANDATORY\n" + printf "\t-q|--quickstarters-repository\tBitbucket URL, e.g. 'https://github.com/opendevstack/ods-quickstarters.git'.\n" + printf "\t-t|--openshift-token\t\tOpenshift token.\n" + printf "\t-p|--project\t\t\tProject Key.\n" + + printf "OPTIONAL\n" + printf "\t-j|--jenkins-url\t\tJenkins url (only if you are not logged in the cluster or you don't have 'oc' installed).\n" + printf "\t-b|--branches\t\t\tSpace-separated list of branches to process (e.g., 'master 4.x 5.x'). If not specified, all branches will be processed.\n" + printf "\t-f|--folder-name\t\tJenkins folder name (defaults to 'qs-automated-tests').\n" + printf "\t-h|--help\t\t\tPrint usage.\n" + printf "\t-v|--verbose\t\t\tEnable verbose mode.\n" + printf "\t-i|--insecure\t\t\tAllow insecure server connections when using SSL.\n" + printf "\t-i|--no-clone\t\t\tDo not clone if it is already cloned.\n" + printf "\n" + printf "\t-o|--ods-ref\t\t\tODS Reference, e.g. 'master, 4.x' (defaults to $ODS_REF)\n" +} + +while [[ "$#" -gt 0 ]]; do + case $1 in + + -v|--verbose) set -x;; + + -h|--help) usage; exit 0;; + + -i|--insecure) INSECURE="--insecure";; + + -p|--project) PROJECT="$2"; shift;; + -p=*|--project=*) PROJECT="${1#*=}";; + + -b|--branches) BRANCHES="$2"; shift;; + -b=*|--branches=*) BRANCHES="${1#*=}";; + + -f|--folder-name) JENKINS_FOLDER_NAME="$2"; shift;; + -f=*|--folder-name=*) JENKINS_FOLDER_NAME="${1#*=}";; + + -q|--quickstarters-repository) QUICKSTARTERS_REPOSITORY_URL="$2"; shift;; + -q=*|--quickstarters-repository=*) QUICKSTARTERS_REPOSITORY_URL="${1#*=}";; + + -o|--ods-ref) ODS_REF="$2"; shift;; + -o=*|--ods-ref=*) ODS_REF="${1#*=}";; + + -j|--jenkins-url) JENKINS_URL="$2"; shift;; + -j=*|--jenkins-url=*) JENKINS_URL="${1#*=}";; + + -t|--openshift-token) OPENSHIFT_TOKEN="$2"; shift;; + -t=*|--openshift-token=*) OPENSHIFT_TOKEN="${1#*=}";; + + -u|--user-name) REPOSITORY_USERNAME="$2"; shift;; + -u=*|--user-name=*) REPOSITORY_USERNAME="${1#*=}";; + + -pw|--user-password) REPOSITORY_PASSWORD="$2"; shift;; + -pw=*|--user-password=*) REPOSITORY_PASSWORD="${1#*=}";; + + -bb|--bitbucket-url) BITBUCKET_URL="$2"; shift;; + -bb=*|--bitbucket-url=*) BITBUCKET_URL="${1#*=;;}";; + + -credential-id|--credentials-id) CREDENTIALS_ID="$2"; shift;; + -credential-id=*|--credentials-id=*) CREDENTIALS_ID="${1#*=}";; + + -oqtb|--ods-quickstarters-test-branch) ODS_QUICKSTARTERS_TESTS_BRANCH="$2"; shift;; + -oqtb=*|--ods-quickstarters-test-branch=*) ODS_QUICKSTARTERS_TESTS_BRANCH="${1#*=}";; + + + --no-clone) NO_CLONE=true;; + + + + + *) echo_error "Unknown parameter passed: $1"; usage; exit 1;; +esac; shift; done + +environment +prerrequisites +process_repo_qs_branch +[ $? -eq 0 ] && echo_done "Jobs created" diff --git a/tests/quickstarter/resources/scripts/job_template.xml b/tests/quickstarter/resources/scripts/job_template.xml new file mode 100644 index 000000000..7551beacf --- /dev/null +++ b/tests/quickstarter/resources/scripts/job_template.xml @@ -0,0 +1,78 @@ + + + false + + + + + + + + + + + -1 + 10 + -1 + -1 + + + + false + + + + + quickstarter + {{QS}} + false + + + odsRef + {{ODSREF}} + false + + + quickstarterRef + {{BRANCH}} + false + + + project + {{PROJECT}} + false + + + quickstartersRepositoryUrl + {{QUICKSTARTERS_REPOSITORY_URL}} + false + + + + + + + + 2 + + + {{BITBUCKET_URL}}/scm/{{PROJECT}}/ods-core.git + {{CREDENTIALS_ID}} + + + + + */{{ODS_QUICKSTARTERS_TESTS_BRANCH}} + + + false + + + + tests/quickstarter/resources/Jenkinsfile-qs + true + + + false + \ No newline at end of file diff --git a/tests/quickstarter/resources/scripts/run_all.xml b/tests/quickstarter/resources/scripts/run_all.xml new file mode 100644 index 000000000..1245fced2 --- /dev/null +++ b/tests/quickstarter/resources/scripts/run_all.xml @@ -0,0 +1,233 @@ + + +true + + +false + \ No newline at end of file diff --git a/tests/quickstarter/sonarqube.go b/tests/quickstarter/sonarqube.go deleted file mode 100644 index 52a8d4e89..000000000 --- a/tests/quickstarter/sonarqube.go +++ /dev/null @@ -1,54 +0,0 @@ -package quickstarter - -import ( - "bytes" - b64 "encoding/base64" - "fmt" - "html/template" - - "github.com/google/go-cmp/cmp" - "github.com/opendevstack/ods-core/tests/utils" -) - -func retrieveSonarScan(projectKey string, config map[string]string) (string, error) { - - fmt.Printf("Getting sonar scan for: %s\n", projectKey) - - sonartoken, _ := b64.StdEncoding.DecodeString(config["SONAR_AUTH_TOKEN_B64"]) - - stdout, stderr, err := utils.RunScriptFromBaseDir("tests/scripts/print-sonar-scan-run.sh", []string{ - string(sonartoken), - config["SONARQUBE_URL"], - projectKey, - }, []string{}) - - if err != nil { - fmt.Printf( - "Execution of `tests/scripts/print-sonar-scan-run.sh` failed: \nStdOut: %s\nStdErr: %s\nErr: %s\n", - stdout, - stderr, - err) - return "", err - } - fmt.Printf("Sonar scan result: \n%s\n", stdout) - - return stdout, nil -} - -func verifySonarScan(componentID string, wantScanFile string, gotScan string, tmplData TemplateData) error { - var wantScan bytes.Buffer - tmpl, err := template.ParseFiles(wantScanFile) - if err != nil { - return fmt.Errorf("Failed to load golden file to verify Sonar scan: %w", err) - } - err = tmpl.Execute(&wantScan, tmplData) - if err != nil { - return fmt.Errorf("Failed to render file to verify Sonar scan: %w", err) - } - - if diff := cmp.Diff(wantScan.String(), gotScan); diff != "" { - return fmt.Errorf("Sonar scan mismatch for %s (-want +got):\n%s", componentID, diff) - } - - return nil -} diff --git a/tests/quickstarter/steps.go b/tests/quickstarter/steps.go index 55e118f92..03762b581 100644 --- a/tests/quickstarter/steps.go +++ b/tests/quickstarter/steps.go @@ -4,11 +4,11 @@ import ( "bytes" "encoding/json" "fmt" - "io/ioutil" + "os" "strings" "github.com/ghodss/yaml" - "github.com/opendevstack/ods-core/tests/utils" + "github.com/opendevstack/ods-core/tests/quickstarter/steps" ) // TestSteps defines the steps the test runner should execute. @@ -16,123 +16,14 @@ type TestSteps struct { // Name of the component to provision ComponentID string `json:"componentID"` // Steps to execute - Steps []TestStep `json:"steps"` -} - -// TestStep describes one step to execute. A step consists of a type (e.g. -// "build"), and the related params for it (e.g. "buildParams"). -type TestStep struct { - // Type of the step - one of "build", "provision", "upload" - Type string `json:"type"` - // Optional description to explain the step's purpose - Description string `json:"description"` - // ComponentID name for that step (overwrites global component name) - ComponentID string `json:"componentID"` - // Parameters for "provison" step type - ProvisionParams *TestStepProvisionParams `json:"provisionParams"` - // Parameters for "build" step type - BuildParams *TestStepBuildParams `json:"buildParams"` - // Parameters for "upload" step type - UploadParams *TestStepUploadParams `json:"uploadParams"` -} - -// TestStepUploadParams defines the parameters for the "provision" step type. -type TestStepUploadParams struct { - // File to add, commit and push to the repository (relative to "testdata" directory) - File string `json:"file"` - // Name of the uploaded file in the repository. Defaults to just the filename of +File+. - Filename string `json:"filename"` -} - -// TestStepProvisionParams defines the parameters for the "provision" step type. -type TestStepProvisionParams struct { - // Name of the quickstarter to provision. - Quickstarter string `json:"quickstarter"` - // Pipeline allows to customize the pipeline name. - // If empty, the pipeline name is generated. - Pipeline string `json:"pipeline"` - // Quickstarter branch for which to run the pipeline. - // For "provision" steps, it defaults to ODS_GIT_REF. - // For "build" steps, it defaults to "master". - Branch string `json:"branch"` - // Jenkins Agent image tag. - // Defaults to ODS_IMAGE_TAG. - AgentImageTag string `json:"agentImageTag"` - // Jenkins Shared library Git reference. - // Defaults to AgentImageTag. - SharedLibraryRef string `json:"sharedLibraryRef"` - // Additional environment variables - Env []utils.EnvPair `json:"env"` - // Verify parameters. - Verify *TestStepVerify `json:"verify"` -} - -// TestStepBuildParams defines the parameters for the "build" step type. -type TestStepBuildParams TestStepProvisionParams - -// TestStepVerify defines the items to verify. -type TestStepVerify struct { - // JSON file defining expected Jenkins stages (relative to "testdata" directory). - JenkinsStages string `json:"jenkinsStages"` - // JSON file defining expected Sonar scan result (relative to "testdata" directory). - SonarScan string `json:"sonarScan"` - // Names of expected attachments to the Jenkins run. - RunAttachments []string `json:"runAttachments"` - // Number of expected test results. - TestResults int `json:"testResults"` - // Expected OpenShift resources in the *-dev namespace. - OpenShiftResources *struct { - // Namespace in which to look for resources (defaults to *-dev). - Namespace string `json:"namespace"` - // Image tags - ImageTags []struct { - // Name of the image - Name string `json:"name"` - // Tag of the image - Tag string `json:"tag"` - } `json:"imageTags"` - // BuildConfig resources - BuildConfigs []string `json:"buildConfigs"` - // ImageStream resources - ImageStreams []string `json:"imageStreams"` - // DeploymentConfig resources - DeploymentConfigs []string `json:"deploymentConfigs"` - // Service resources. The check includes verifying that a running, ready pod is assigned. - Services []string `json:"services"` - } `json:"openShiftResources"` -} - -// TemplateData holds template parameters. Those will be applied to all -// values defined in the steps, as they are treated as Go templates. -// For example, Jenkins run attachments can be defined like this: -// -// runAttachments: -// - SCRR-{{.ProjectID}}-{{.ComponentID}}.docx, and then the -type TemplateData struct { - // Project ID (the prefix of the *-cd, *-dev and *-test namespaces). - ProjectID string - // Component ID (the value of the overall "componentID" or the specific - // step "componentID"). - ComponentID string - // ODS namespace read from the ods-core.env configuration (e.g. "ods") - OdsNamespace string - // ODS Git reference read from the ods-core.env configuration (e.g. "v3.0.0") - OdsGitRef string - // ODS image tag read from the ods-core.env configuration (e.g. "3.x") - OdsImageTag string - // ODS Bitbucket project name read from the ods-core.env configuration (e.g. "OPENDEVSTACK") - OdsBitbucketProject string - // ODS Git reference with underscores instead of slashes and dashes. - SanitizedOdsGitRef string - // Jenkins Build number - BuildNumber string + Steps []steps.TestStep `json:"steps"` } // readSteps reads "steps.yml" in given folder. // It does not allow extra fields to avoid typos, and checks if the given // step types are known. func readSteps(folder string) (*TestSteps, error) { - yamlContent, err := ioutil.ReadFile(folder + "/steps.yml") + yamlContent, err := os.ReadFile(folder + "/steps.yml") if err != nil { return nil, fmt.Errorf("Cannot read file: %w", err) } @@ -149,7 +40,18 @@ func readSteps(folder string) (*TestSteps, error) { } // A poor man's workaround for missing enums in Go. There are better ways // to do it, but nothing as simple as this. - allowedTypes := map[string]bool{"provision": true, "build": true, "upload": true} + allowedTypes := map[string]bool{ + steps.StepTypeProvision: true, + steps.StepTypeBuild: true, + steps.StepTypeUpload: true, + steps.StepTypeRun: true, + steps.StepTypeHTTP: true, + steps.StepTypeWait: true, + steps.StepTypeInspect: true, + steps.StepTypeExposeService: true, + steps.StepTypeBitbucket: true, + } + for i, step := range s.Steps { if _, ok := allowedTypes[step.Type]; !ok { allowed := []string{} diff --git a/tests/quickstarter/steps/bitbucket.go b/tests/quickstarter/steps/bitbucket.go new file mode 100644 index 000000000..540c3ec68 --- /dev/null +++ b/tests/quickstarter/steps/bitbucket.go @@ -0,0 +1,754 @@ +package steps + +import ( + "crypto/tls" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "testing" + "text/template" + "time" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" + "github.com/opendevstack/ods-core/tests/utils" +) + +// BitbucketPullRequest represents a Bitbucket pull request response +type BitbucketPullRequest struct { + ID int `json:"id"` + Version int `json:"version"` + Title string `json:"title"` + Description string `json:"description"` + State string `json:"state"` + Open bool `json:"open"` + Closed bool `json:"closed"` + FromRef struct { + ID string `json:"id"` + DisplayID string `json:"displayId"` + LatestCommit string `json:"latestCommit"` + } `json:"fromRef"` + ToRef struct { + ID string `json:"id"` + DisplayID string `json:"displayId"` + LatestCommit string `json:"latestCommit"` + } `json:"toRef"` + Author struct { + User struct { + Name string `json:"name"` + } `json:"user"` + } `json:"author"` + // Raw JSON for flexible querying + RawJSON map[string]interface{} `json:"-"` +} + +// BitbucketPRVerification represents a verification rule for PR content +type BitbucketPRVerification struct { + JSONPath string // JSON path like ".title", ".state", ".fromRef.displayId" + ExpectedValue interface{} // Expected value (string, bool, int, etc.) + Description string // Human-readable description of what's being checked +} + +// BitbucketRepository represents a Bitbucket repository +type BitbucketRepository struct { + Slug string `json:"slug"` +} + +// BitbucketRepositoriesResponse represents the list of repositories response +type BitbucketRepositoriesResponse struct { + Values []BitbucketRepository `json:"values"` +} + +// ExecuteBitbucket handles the bitbucket step type for Bitbucket interactions. +func ExecuteBitbucket(t *testing.T, step TestStep, tmplData TemplateData, config map[string]string, projectName string, testdataPath string) { + if step.BitbucketParams == nil { + t.Fatal("Missing bitbucket parameters") + } + + params := step.BitbucketParams + action := params.Action + logger.KeyValue("Action", action) + + switch action { + case "recreate-repo": + executeBitbucketRecreateRepo(t, step, params, config, projectName) + case "approve-pr": + executeBitbucketApprovePR(t, step, params, config, projectName) + case "get-pullrequest": + executeBitbucketGetPullRequest(t, step, params, config, projectName) + case "delete-files": + executeBitbucketDeleteFiles(t, step, params, config, projectName) + case "upload-file": + executeBitbucketUpload(t, step, params, tmplData, config, projectName, testdataPath) + default: + t.Fatalf("Unknown bitbucket action: %s (allowed: recreate-repo, approve-pr, get-pullrequest, delete-files, upload-file)", action) + } +} + +// executeBitbucketRecreateRepo handles repository recreation +func executeBitbucketRecreateRepo(t *testing.T, step TestStep, params *TestStepBitbucketParams, config map[string]string, projectName string) { + if params.Repository == "" { + t.Fatal("Missing repository parameter for recreate-repo action") + } + + project := params.Project + if project == "" { + project = projectName + } + project = renderTemplate(t, project, CreateTemplateData(config, step.ComponentID, "", projectName)) + + repository := renderTemplate(t, params.Repository, CreateTemplateData(config, step.ComponentID, "", projectName)) + + logger.Running(fmt.Sprintf("Recreating Bitbucket repository: %s/%s", project, repository)) + logger.KeyValue("Project", project) + logger.KeyValue("Repository", repository) + + if err := recreateBitbucketRepo(config, project, repository); err != nil { + logger.Failure(fmt.Sprintf("Recreate repository %s/%s", project, repository), err) + t.Fatalf("Failed to recreate repository: %v", err) + } + + logger.Success(fmt.Sprintf("Repository %s/%s recreated successfully", project, repository)) +} + +// executeBitbucketDeleteFiles handles deletion of files/folders from a repository +func executeBitbucketDeleteFiles(t *testing.T, step TestStep, params *TestStepBitbucketParams, config map[string]string, projectName string) { + if params.Repository == "" || len(params.Paths) == 0 { + t.Fatal("Missing repository or paths parameter for delete-files action") + } + + project := params.Project + if project == "" { + project = projectName + } + project = renderTemplate(t, project, CreateTemplateData(config, step.ComponentID, "", projectName)) + + repository := renderTemplate(t, params.Repository, CreateTemplateData(config, step.ComponentID, "", projectName)) + + // Render all paths through template engine + tmplData := CreateTemplateData(config, step.ComponentID, "", projectName) + var renderedPaths []string + for _, path := range params.Paths { + renderedPaths = append(renderedPaths, renderTemplate(t, path, tmplData)) + } + + commitMessage := params.CommitMessage + if commitMessage == "" { + commitMessage = "Remove files/folders" + } + commitMessage = renderTemplate(t, commitMessage, tmplData) + + logger.Running(fmt.Sprintf("Deleting files from Bitbucket repository: %s/%s", project, repository)) + logger.KeyValue("Project", project) + logger.KeyValue("Repository", repository) + logger.KeyValue("Paths to delete", strings.Join(renderedPaths, ", ")) + logger.KeyValue("Commit message", commitMessage) + + cdUserPassword, err := base64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) + if err != nil { + logger.Failure("Decode CD user password", err) + t.Fatalf("Failed to decode CD user password: %v", err) + } + + // Build arguments for the shell script + scriptArgs := []string{ + fmt.Sprintf("--bitbucket=%s", config["BITBUCKET_URL"]), + fmt.Sprintf("--user=%s", config["CD_USER_ID"]), + fmt.Sprintf("--password=%s", cdUserPassword), + fmt.Sprintf("--project=%s", project), + fmt.Sprintf("--repository=%s", repository), + fmt.Sprintf("--message=%s", commitMessage), + } + + // Add each path to delete as a separate argument + for _, path := range renderedPaths { + scriptArgs = append(scriptArgs, fmt.Sprintf("--files=%s", path)) + } + + logger.Waiting("Executing Bitbucket delete-files script") + stdout, stderr, err := utils.RunScriptFromBaseDir("tests/scripts/delete-files-from-bitbucket-with-git.sh", scriptArgs, []string{}) + if err != nil { + logger.Error("Bitbucket delete-files script output:\n%s", stdout) + logger.Failure("Delete files from Bitbucket", err) + t.Fatalf( + "Execution of `delete-files-from-bitbucket-with-git.sh` failed: \nStdOut: %s\nStdErr: %s\nErr: %s\n", + stdout, + stderr, + err) + } else { + logger.Success(fmt.Sprintf("Deleted %d file(s)/folder(s) from %s/%s", len(renderedPaths), project, repository)) + } +} + +// executeBitbucketUpload handles uploading a file to a Bitbucket repository +func executeBitbucketUpload(t *testing.T, step TestStep, params *TestStepBitbucketParams, tmplData TemplateData, config map[string]string, projectName string, testdataPath string) { + if params.File == "" { + t.Fatal("Missing file parameter for upload-file action") + } + + project := params.Project + if project == "" { + project = projectName + } + project = renderTemplate(t, project, tmplData) + + defaultRepository := fmt.Sprintf("%s-%s", strings.ToLower(projectName), step.ComponentID) + uploadParams := &TestStepUploadParams{ + File: params.File, + Filename: params.Filename, + Render: params.Render, + Repository: params.Repository, + } + + uploadFileToBitbucket(t, uploadParams, tmplData, testdataPath, defaultRepository, project, config) +} + +// uploadFileToBitbucket uploads a file to a Bitbucket repository using the shared script. +func uploadFileToBitbucket(t *testing.T, uploadParams *TestStepUploadParams, tmplData TemplateData, testdataPath string, defaultRepository string, project string, config map[string]string) { + if uploadParams == nil || uploadParams.File == "" { + t.Fatalf("Missing upload parameters.") + } + + filename := uploadParams.Filename + if filename == "" { + filename = filepath.Base(uploadParams.File) + } + + fileToUpload := filepath.Clean(filepath.Join(testdataPath, uploadParams.File)) + + if _, err := os.Stat(fileToUpload); err != nil { + logger.Failure("Load file to upload", err) + t.Fatalf("Failed to load file to upload: \nErr: %s\n", err) + } + + if uploadParams.Render { + logger.Waiting("Rendering template to upload") + if err := renderUploadFile(fileToUpload, tmplData); err != nil { + logger.Failure("Render file", err) + t.Fatalf("Failed to render file: \nErr: %s\n", err) + } + } + + targetRepository := defaultRepository + if len(uploadParams.Repository) > 0 { + targetRepository = renderTemplate(t, uploadParams.Repository, tmplData) + } + + project = renderTemplate(t, project, tmplData) + + logger.Running(fmt.Sprintf("Uploading file %s", uploadParams.File)) + logger.KeyValue("Repository", targetRepository) + logger.KeyValue("Filename", filename) + logger.Waiting("Executing BitBucket upload script") + + cdUserPassword, err := base64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) + if err != nil { + logger.Failure("Decode CD user password", err) + t.Fatalf("Execution of `upload-file-to-bitbucket-with-git.sh` failed: \nErr: %s\n", err) + } + + stdout, stderr, err := utils.RunScriptFromBaseDir("tests/scripts/upload-file-to-bitbucket-with-git.sh", []string{ + fmt.Sprintf("--bitbucket=%s", config["BITBUCKET_URL"]), + fmt.Sprintf("--user=%s", config["CD_USER_ID"]), + fmt.Sprintf("--password=%s", cdUserPassword), + fmt.Sprintf("--project=%s", project), + fmt.Sprintf("--repository=%s", targetRepository), + fmt.Sprintf("--file=%s", fileToUpload), + fmt.Sprintf("--filename=%s", filename), + }, []string{}) + if err != nil { + logger.Error("BitBucket upload script output:\n%s", stdout) + logger.Failure("Upload file to BitBucket", err) + t.Fatalf( + "Execution of `upload-file-to-bitbucket-with-git.sh` failed: \nStdOut: %s\nStdErr: %s\nErr: %s\n", + stdout, + stderr, + err) + } + + logger.Success(fmt.Sprintf("Uploaded file %s to %s/%s", filename, project, targetRepository)) +} + +// renderUploadFile renders the given file as a Go template using tmplData. +func renderUploadFile(filePath string, tmplData TemplateData) error { + tmpl, err := template.ParseFiles(filePath) + if err != nil { + return err + } + + outputFile, err := os.Create(filePath) + if err != nil { + return err + } + defer outputFile.Close() //nolint:errcheck + + logger.Waiting("Rendering file") + if err := tmpl.Execute(outputFile, tmplData); err != nil { + return err + } + + logger.Success("File rendered") + return nil +} + +func executeBitbucketGetPullRequest(t *testing.T, step TestStep, params *TestStepBitbucketParams, config map[string]string, projectName string) { + if params.Repository == "" || params.PullRequestID == "" { + t.Fatal("Missing repository or pullRequestID parameter for get-pullrequest action") + } + + project := params.Project + if project == "" { + project = projectName + } + project = renderTemplate(t, project, CreateTemplateData(config, step.ComponentID, "", projectName)) + + repository := renderTemplate(t, params.Repository, CreateTemplateData(config, step.ComponentID, "", projectName)) + prID := params.PullRequestID + + logger.Running(fmt.Sprintf("Fetching Bitbucket pull request: %s/%s#%s", project, repository, prID)) + logger.KeyValue("Project", project) + logger.KeyValue("Repository", repository) + logger.KeyValue("Pull Request ID", prID) + + pr, err := getBitbucketPR(config, project, repository, prID) + if err != nil { + logger.Failure(fmt.Sprintf("Get PR %s/%s#%s details", project, repository, prID), err) + t.Fatalf("Failed to get pull request details: %v", err) + } + if pr == nil { + logger.Failure(fmt.Sprintf("PR %s/%s#%s does not exist", project, repository, prID), nil) + t.Fatalf("Pull request %s/%s#%s does not exist", project, repository, prID) + } + + logger.Success(fmt.Sprintf("Pull request %s/%s#%s found - Title: '%s', State: %s, Branch: %s", + project, repository, prID, pr.Title, pr.State, pr.FromRef.DisplayID)) + logger.KeyValue("Latest commit", pr.FromRef.LatestCommit) + logger.KeyValue("Author", pr.Author.User.Name) + + // Verify PR content if verification rules are provided + if params.Verify != nil && params.Verify.PRChecks != nil { + logger.Running(fmt.Sprintf("Verifying pull request content: %s/%s#%s", project, repository, prID)) + verifications := buildVerificationsFromMap(params.Verify.PRChecks) + if err := verifyBitbucketPRContent(pr, verifications); err != nil { + logger.Failure(fmt.Sprintf("PR content verification failed for %s/%s#%s", project, repository, prID), err) + t.Fatalf("Pull request content verification failed: %v", err) + } + logger.Success(fmt.Sprintf("Pull request content verified successfully (%d checks passed)", len(verifications))) + } +} + +// executeBitbucketApprovePR handles pull request approval with validation, reviewer addition, and approval +func executeBitbucketApprovePR(t *testing.T, step TestStep, params *TestStepBitbucketParams, config map[string]string, projectName string) { + if params.Repository == "" || params.PullRequestID == "" { + t.Fatal("Missing repository or pullRequestID parameter for approve-pr action") + } + + project := params.Project + if project == "" { + project = projectName + } + project = renderTemplate(t, project, CreateTemplateData(config, step.ComponentID, "", projectName)) + + repository := renderTemplate(t, params.Repository, CreateTemplateData(config, step.ComponentID, "", projectName)) + prID := params.PullRequestID + reviewer := params.Reviewer + + logger.Running(fmt.Sprintf("Approving Bitbucket pull request: %s/%s#%s", project, repository, prID)) + logger.KeyValue("Project", project) + logger.KeyValue("Repository", repository) + logger.KeyValue("Pull Request ID", prID) + if reviewer != "" { + logger.KeyValue("Reviewer to add", reviewer) + } + + // Step 1: Get PR details (validates existence and retrieves commit info) + logger.Running(fmt.Sprintf("Fetching pull request details: %s/%s#%s", project, repository, prID)) + pr, err := getBitbucketPR(config, project, repository, prID) + if err != nil { + logger.Failure(fmt.Sprintf("Get PR %s/%s#%s details", project, repository, prID), err) + t.Fatalf("Failed to get pull request details: %v", err) + } + if pr == nil { + logger.Failure(fmt.Sprintf("PR %s/%s#%s does not exist", project, repository, prID), nil) + t.Fatalf("Pull request %s/%s#%s does not exist", project, repository, prID) + } + logger.Success(fmt.Sprintf("Pull request %s/%s#%s found - Title: '%s', State: %s, Branch: %s", + project, repository, prID, pr.Title, pr.State, pr.FromRef.DisplayID)) + logger.KeyValue("Latest commit", pr.FromRef.LatestCommit) + logger.KeyValue("Author", pr.Author.User.Name) + + // Step 1.5: Verify PR content if verification rules are provided + if params.Verify != nil && params.Verify.PRChecks != nil { + logger.Running(fmt.Sprintf("Verifying pull request content: %s/%s#%s", project, repository, prID)) + verifications := buildVerificationsFromMap(params.Verify.PRChecks) + if err := verifyBitbucketPRContent(pr, verifications); err != nil { + logger.Failure(fmt.Sprintf("PR content verification failed for %s/%s#%s", project, repository, prID), err) + t.Fatalf("Pull request content verification failed: %v", err) + } + logger.Success(fmt.Sprintf("Pull request content verified successfully (%d checks passed)", len(verifications))) + } + + // Step 2: Add reviewer (use CD_USER if not specified) + reviewerToAdd := reviewer + if reviewerToAdd == "" { + reviewerToAdd = config["CD_USER_ID"] + } + logger.Running(fmt.Sprintf("Adding reviewer %s to pull request: %s/%s#%s", reviewerToAdd, project, repository, prID)) + if err := addBitbucketPRReviewer(config, project, repository, prID, reviewerToAdd); err != nil { + logger.Failure(fmt.Sprintf("Add reviewer %s to PR %s/%s#%s", reviewerToAdd, project, repository, prID), err) + t.Fatalf("Failed to add reviewer: %v", err) + } + logger.Success(fmt.Sprintf("Reviewer %s added to pull request", reviewerToAdd)) + + // Step 3: Approve the pull request + logger.Running(fmt.Sprintf("Approving pull request: %s/%s#%s", project, repository, prID)) + if err := approveBitbucketPR(config, project, repository, prID); err != nil { + logger.Failure(fmt.Sprintf("Approve PR %s/%s#%s", project, repository, prID), err) + t.Fatalf("Failed to approve pull request: %v", err) + } + + logger.Success(fmt.Sprintf("Pull request %s/%s#%s approved successfully", project, repository, prID)) +} + +// recreateBitbucketRepo recreates a Bitbucket repository +func recreateBitbucketRepo(config map[string]string, project string, repo string) error { + password, err := base64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) + if err != nil { + return fmt.Errorf("Error decoding cd_user password: %w", err) + } + + // Delete the repository + url := fmt.Sprintf("%s/rest/api/1.0/projects/%s/repos/%s", + config["BITBUCKET_URL"], project, repo) + + tlsConfig := &tls.Config{InsecureSkipVerify: true} + client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}} + + req, err := http.NewRequest("DELETE", url, nil) + if err != nil { + return fmt.Errorf("Failed to create delete request for repository %s/%s: %w", project, repo, err) + } + + req.SetBasicAuth(config["CD_USER_ID"], string(password)) + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("Failed to delete repository %s/%s: %w", project, repo, err) + } + defer resp.Body.Close() //nolint:errcheck + + // Accept 202 (Accepted - scheduled for deletion), 204 (No Content), or 200 (OK) + if resp.StatusCode != http.StatusAccepted && resp.StatusCode != http.StatusNoContent && resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) //nolint:errcheck + return fmt.Errorf("Failed to delete repository %s/%s: HTTP %d - %s", project, repo, resp.StatusCode, string(body)) + } + + // If deletion was scheduled (202), wait for it to complete + if resp.StatusCode == http.StatusAccepted { + logger.Info("Repository %s/%s scheduled for deletion, waiting...", project, repo) + // Wait for repository to be deleted + for i := 0; i < 30; i++ { + time.Sleep(2 * time.Second) + exists, err := checkBitbucketRepositoryExists(config, project, repo) + if err != nil { + return fmt.Errorf("Failed to check repository deletion status: %w", err) + } + if !exists { + logger.Success(fmt.Sprintf("Repository %s/%s deleted", project, repo)) + break + } + if i == 29 { + return fmt.Errorf("Timeout waiting for repository %s/%s to be deleted", project, repo) + } + logger.Info("Waiting for repository deletion (attempt %d/30)...", i+1) + } + } + + // Recreate the repository + createURL := fmt.Sprintf("%s/rest/api/1.0/projects/%s/repos", + config["BITBUCKET_URL"], project) + + payload := fmt.Sprintf(`{"name":"%s","scmId":"git"}`, repo) + + req, err = http.NewRequest("POST", createURL, strings.NewReader(payload)) + if err != nil { + return fmt.Errorf("Failed to create repository creation request for %s/%s: %w", project, repo, err) + } + + req.SetBasicAuth(config["CD_USER_ID"], string(password)) + req.Header.Set("Accept", "application/json;charset=UTF-8") + req.Header.Set("Content-Type", "application/json") + + resp, err = client.Do(req) + if err != nil { + return fmt.Errorf("Failed to recreate repository %s/%s: %w", project, repo, err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusCreated && resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) //nolint:errcheck + return fmt.Errorf("Failed to recreate repository %s/%s: HTTP %d - %s", project, repo, resp.StatusCode, string(body)) + } + + return nil +} + +// approveBitbucketPR approves a pull request in Bitbucket +func approveBitbucketPR(config map[string]string, project string, repo string, prID string) error { + password, err := base64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) + if err != nil { + return fmt.Errorf("Error decoding cd_user password: %w", err) + } + + url := fmt.Sprintf("%s/rest/api/latest/projects/%s/repos/%s/pull-requests/%s/review", + config["BITBUCKET_URL"], project, repo, prID) + + payload := `{"participantStatus":"APPROVED"}` + + tlsConfig := &tls.Config{InsecureSkipVerify: true} + client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}} + + req, err := http.NewRequest("PUT", url, strings.NewReader(payload)) + if err != nil { + return fmt.Errorf("Failed to create request to approve PR %s/%s#%s: %w", project, repo, prID, err) + } + + req.SetBasicAuth(config["CD_USER_ID"], string(password)) + req.Header.Set("Accept", "application/json;charset=UTF-8") + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("Failed to approve PR %s/%s#%s: %w", project, repo, prID, err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) //nolint:errcheck + return fmt.Errorf("Failed to approve PR %s/%s#%s: HTTP %d - %s", project, repo, prID, resp.StatusCode, string(body)) + } + + return nil +} + +// getBitbucketPR retrieves the full pull request details +func getBitbucketPR(config map[string]string, project string, repo string, prID string) (*BitbucketPullRequest, error) { + password, err := base64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) + if err != nil { + return nil, fmt.Errorf("Error decoding cd_user password: %w", err) + } + + url := fmt.Sprintf("%s/rest/api/1.0/projects/%s/repos/%s/pull-requests/%s", + config["BITBUCKET_URL"], project, repo, prID) + + tlsConfig := &tls.Config{InsecureSkipVerify: true} + client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}} + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, fmt.Errorf("Failed to create request for PR %s/%s#%s: %w", project, repo, prID, err) + } + + req.SetBasicAuth(config["CD_USER_ID"], string(password)) + + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("Failed to get pull request %s/%s#%s: %w", project, repo, prID, err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode == http.StatusNotFound { + return nil, nil // PR doesn't exist + } + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) //nolint:errcheck + return nil, fmt.Errorf("Failed to get pull request %s/%s#%s: HTTP %d - %s", project, repo, prID, resp.StatusCode, string(body)) + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("Failed to read response body for PR %s/%s#%s: %w", project, repo, prID, err) + } + + var pr BitbucketPullRequest + err = json.Unmarshal(body, &pr) + if err != nil { + return nil, fmt.Errorf("Failed to parse PR response for %s/%s#%s: %w", project, repo, prID, err) + } + + // Also unmarshal to raw map for flexible querying + var rawJSON map[string]interface{} + err = json.Unmarshal(body, &rawJSON) + if err == nil { + pr.RawJSON = rawJSON + } + + return &pr, nil +} + +// verifyBitbucketPRContent verifies pull request content using JSON path queries +func verifyBitbucketPRContent(pr *BitbucketPullRequest, verifications []BitbucketPRVerification) error { + if pr == nil { + return fmt.Errorf("PR is nil") + } + + if pr.RawJSON == nil { + return fmt.Errorf("PR raw JSON data is not available") + } + + for _, verification := range verifications { + actualValue, err := extractJSONPathValue(pr.RawJSON, verification.JSONPath) + if err != nil { + return fmt.Errorf("Failed to extract value from path '%s': %w", verification.JSONPath, err) + } + + if !compareValues(actualValue, verification.ExpectedValue) { + return fmt.Errorf("%s: expected '%v', got '%v' (path: %s)", + verification.Description, verification.ExpectedValue, actualValue, verification.JSONPath) + } + logger.Debug("%s: expected '%v', got '%v' (path: %s)", + verification.Description, verification.ExpectedValue, actualValue, verification.JSONPath) + } + + return nil +} + +// extractJSONPathValue extracts a value from a JSON object using a simple path notation +// Supports paths like ".title", ".state", ".fromRef.displayId", ".author.user.name" +func extractJSONPathValue(data map[string]interface{}, path string) (interface{}, error) { + if path == "" { + return nil, fmt.Errorf("Empty path") + } + + // Remove leading dot if present + path = strings.TrimPrefix(path, ".") + + // Split path by dots + parts := strings.Split(path, ".") + + var current interface{} = data + + for _, part := range parts { + switch v := current.(type) { + case map[string]interface{}: + if val, ok := v[part]; ok { + current = val + } else { + return nil, fmt.Errorf("Key '%s' not found in path '%s'", part, path) + } + default: + return nil, fmt.Errorf("Cannot navigate through non-object at '%s' in path '%s'", part, path) + } + } + + return current, nil +} + +// compareValues compares two values for equality, handling different types +func compareValues(actual, expected interface{}) bool { + // Handle nil cases + if actual == nil && expected == nil { + return true + } + if actual == nil || expected == nil { + return false + } + + // Convert both to strings for comparison + actualStr := fmt.Sprintf("%v", actual) + expectedStr := fmt.Sprintf("%v", expected) + + // Check for "contains:" prefix for substring matching + if strings.HasPrefix(expectedStr, "contains:") { + substring := strings.TrimPrefix(expectedStr, "contains:") + return strings.Contains(actualStr, substring) + } + + // Exact match + return actualStr == expectedStr +} + +// buildVerificationsFromMap converts a map of JSON paths to expected values into BitbucketPRVerification structs +func buildVerificationsFromMap(checks map[string]interface{}) []BitbucketPRVerification { + var verifications []BitbucketPRVerification + for path, expectedValue := range checks { + verifications = append(verifications, BitbucketPRVerification{ + JSONPath: path, + ExpectedValue: expectedValue, + Description: fmt.Sprintf("Check '%s'", path), + }) + } + return verifications +} + +// addBitbucketPRReviewer adds a user as a reviewer to a pull request +func addBitbucketPRReviewer(config map[string]string, project string, repo string, prID string, username string) error { + password, err := base64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) + if err != nil { + return fmt.Errorf("Error decoding cd_user password: %w", err) + } + + url := fmt.Sprintf("%s/rest/api/1.0/projects/%s/repos/%s/pull-requests/%s/participants", + config["BITBUCKET_URL"], project, repo, prID) + + payload := fmt.Sprintf(`{"role":"REVIEWER","user":{"name":"%s"}}`, username) + + // Create HTTP client with insecure TLS + tlsConfig := &tls.Config{InsecureSkipVerify: true} + client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}} + + req, err := http.NewRequest("POST", url, strings.NewReader(payload)) + if err != nil { + return fmt.Errorf("Failed to create request to add reviewer to PR %s/%s#%s: %w", project, repo, prID, err) + } + + req.SetBasicAuth(config["CD_USER_ID"], string(password)) + req.Header.Set("Accept", "application/json;charset=UTF-8") + req.Header.Set("Content-Type", "application/json") + + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("Failed to add reviewer to PR %s/%s#%s: %w", project, repo, prID, err) + } + defer resp.Body.Close() //nolint:errcheck + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + body, _ := io.ReadAll(resp.Body) //nolint:errcheck + return fmt.Errorf("Failed to add reviewer to PR %s/%s#%s: HTTP %d - %s", project, repo, prID, resp.StatusCode, string(body)) + } + + return nil +} + +// checkBitbucketRepositoryExists checks if a repository exists in a Bitbucket project +func checkBitbucketRepositoryExists(config map[string]string, project string, repo string) (bool, error) { + password, err := base64.StdEncoding.DecodeString(config["CD_USER_PWD_B64"]) + if err != nil { + return false, fmt.Errorf("Error decoding cd_user password: %w", err) + } + + url := fmt.Sprintf("%s/rest/api/1.0/projects/%s/repos/%s", config["BITBUCKET_URL"], project, repo) + + tlsConfig := &tls.Config{InsecureSkipVerify: true} + client := &http.Client{Transport: &http.Transport{TLSClientConfig: tlsConfig}} + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return false, fmt.Errorf("Failed to create request to check repository %s/%s: %w", project, repo, err) + } + + req.SetBasicAuth(config["CD_USER_ID"], string(password)) + + resp, err := client.Do(req) + if err != nil { + return false, fmt.Errorf("Failed to check repository %s/%s: %w", project, repo, err) + } + defer resp.Body.Close() //nolint:errcheck + + return resp.StatusCode == http.StatusOK, nil +} diff --git a/tests/quickstarter/steps/build.go b/tests/quickstarter/steps/build.go new file mode 100644 index 000000000..776010c78 --- /dev/null +++ b/tests/quickstarter/steps/build.go @@ -0,0 +1,56 @@ +package steps + +import ( + "fmt" + "testing" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" + "github.com/opendevstack/ods-core/tests/utils" +) + +// ExecuteBuild handles the build step type. +func ExecuteBuild(t *testing.T, step TestStep, testdataPath string, tmplData TemplateData, repoName string, config map[string]string, projectName string) { + logger.Running(fmt.Sprintf("Build for repository %s", repoName)) + + branch := DefaultBranch + if len(step.BuildParams.Branch) > 0 { + branch = renderTemplate(t, step.BuildParams.Branch, tmplData) + } + logger.KeyValue("Branch", branch) + + var repository string = repoName + if len(step.BuildParams.Repository) > 0 { + repository = renderTemplate(t, step.BuildParams.Repository, tmplData) + } + logger.KeyValue("Repository", repository) + + // Render environment variable values through template engine + renderedEnv := make([]utils.EnvPair, len(step.BuildParams.Env)) + for i, envPair := range step.BuildParams.Env { + renderedEnv[i] = utils.EnvPair{ + Name: envPair.Name, + Value: renderTemplate(t, envPair.Value, tmplData), + } + } + + request := utils.RequestBuild{ + Repository: repository, + Branch: branch, + Project: projectName, + Env: renderedEnv, + } + pipelineName := renderTemplate(t, step.BuildParams.Pipeline, tmplData) + logger.KeyValue("Pipeline", pipelineName) + + verify := step.BuildParams.Verify + + logger.Waiting("Jenkins pipeline execution") + buildName, err := utils.RunJenkinsPipeline(DefaultJenkinsfile, request, pipelineName) + if err != nil { + logger.Failure("Jenkins pipeline execution", err) + t.Fatal(err) + } + logger.Success(fmt.Sprintf("Build triggered with name %s", buildName)) + + verifyPipelineRun(t, step, verify, testdataPath, repoName, buildName, config, projectName) +} diff --git a/tests/quickstarter/steps/executor.go b/tests/quickstarter/steps/executor.go new file mode 100644 index 000000000..647a135a8 --- /dev/null +++ b/tests/quickstarter/steps/executor.go @@ -0,0 +1,167 @@ +package steps + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "testing" + "time" + + "github.com/opendevstack/ods-core/tests/quickstarter/diagnostics" + "github.com/opendevstack/ods-core/tests/quickstarter/logger" +) + +// StepExecutor handles step execution with lifecycle hooks and retry logic. +type StepExecutor struct { + testdataPath string + tmplData TemplateData + diagnostics *diagnostics.DiagnosticsCollector +} + +// NewStepExecutor creates a new step executor. +func NewStepExecutor(testdataPath string, tmplData TemplateData) *StepExecutor { + return &StepExecutor{ + testdataPath: testdataPath, + tmplData: tmplData, + diagnostics: diagnostics.NewDiagnosticsCollector(context.TODO()), + } +} + +// ExecuteWithHooks executes a step with before/after hooks. +func (se *StepExecutor) ExecuteWithHooks( + t *testing.T, + step *TestStep, + handler func() error, +) error { + log := logger.GetLogger() + + // Execute beforeStep hook if specified + if step.BeforeStep != "" { + log.Infof("Executing beforeStep hook: %s", step.BeforeStep) + if err := se.executeHook(t, step.BeforeStep); err != nil { + return fmt.Errorf("beforeStep hook failed: %w", err) + } + } + + // Execute the main step with retry logic + err := se.executeWithRetry(t, step, handler) + + // Execute afterStep hook regardless of success/failure + if step.AfterStep != "" { + log.Infof("Executing afterStep hook: %s", step.AfterStep) + if hookErr := se.executeHook(t, step.AfterStep); hookErr != nil { + log.Warnf("afterStep hook failed: %v", hookErr) + // Don't override main error, but log the hook failure + if err == nil { + err = hookErr + } + } + } + + return err +} + +// executeWithRetry executes a step with retry logic. +func (se *StepExecutor) executeWithRetry( + t *testing.T, + step *TestStep, + handler func() error, +) error { + log := logger.GetLogger() + + // Determine retry configuration + retryConfig := step.Retry + if retryConfig == nil { + retryConfig = &StepRetryConfig{Attempts: 0} + } + + maxAttempts := retryConfig.Attempts + 1 // +1 for the initial attempt + if maxAttempts < 1 { + maxAttempts = 1 + } + + var lastErr error + for attempt := 1; attempt <= maxAttempts; attempt++ { + if attempt > 1 { + log.Infof("Retry attempt %d/%d", attempt, maxAttempts) + } + + lastErr = handler() + + // If successful, return + if lastErr == nil { + return nil + } + + // Check if we should retry + if attempt < maxAttempts { + // Check if error is transient (if configured) + if retryConfig.OnlyTransient && !diagnostics.IsTransientError(lastErr) { + log.Warnf("Error is not transient, skipping retries: %v", lastErr) + return lastErr + } + + // Calculate delay + delay := 2 * time.Second // default delay + if retryConfig.Delay != "" { + parsedDelay, err := time.ParseDuration(retryConfig.Delay) + if err == nil { + delay = parsedDelay + } + } + + log.Infof("Waiting %v before next attempt", delay) + time.Sleep(delay) + } + } + + return lastErr +} + +// executeHook executes a hook script. +func (se *StepExecutor) executeHook(t *testing.T, hookFile string) error { + hookPath := filepath.Join(se.testdataPath, hookFile) + + // Check if hook file exists + if _, err := os.Stat(hookPath); os.IsNotExist(err) { + return fmt.Errorf("hook file not found: %s", hookPath) + } + + // Execute the hook script + cmd := exec.Command("bash", hookPath) + + // Set up environment with template data + env := os.Environ() + for key, value := range se.tmplData { + if strVal, ok := value.(string); ok { + env = append(env, fmt.Sprintf("%s=%s", key, strVal)) + } + } + cmd.Env = env + + if err := cmd.Run(); err != nil { + return fmt.Errorf("hook execution failed: %w", err) + } + + return nil +} + +// ShouldSkipStep determines if a step should be skipped. +func ShouldSkipStep(t *testing.T, step *TestStep, tmplData TemplateData) bool { + if step.Skip { + return true + } + + // Evaluate skipIf condition + if step.SkipIf != "" { + rendered := renderTemplate(t, step.SkipIf, tmplData) + // Simple evaluation: treat non-empty string as true + if rendered != "" && rendered != "false" && rendered != "0" { + return true + } + } + + return false +} diff --git a/tests/quickstarter/steps/expose_service.go b/tests/quickstarter/steps/expose_service.go new file mode 100644 index 000000000..f24b62eaf --- /dev/null +++ b/tests/quickstarter/steps/expose_service.go @@ -0,0 +1,62 @@ +package steps + +import ( + "fmt" + "testing" + "time" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" +) + +// ExecuteExposeService handles the expose-service step type. +// This step explicitly exposes services by setting up routes or port-forwards +// based on the execution environment. +func ExecuteExposeService(t *testing.T, step TestStep, tmplData TemplateData, projectName string) { + if step.ExposeServiceParams == nil || len(step.ExposeServiceParams.Services) == 0 { + t.Fatalf("Missing expose-service parameters: no services defined") + } + + logger.SubSection(fmt.Sprintf("Exposing %d service(s)", len(step.ExposeServiceParams.Services))) + + for i, svcConfig := range step.ExposeServiceParams.Services { + logger.Step(i+1, len(step.ExposeServiceParams.Services), "expose-service", fmt.Sprintf("Expose service: %s", svcConfig.ServiceName)) + + // Render namespace template (defaults to projectName-dev) + namespace := svcConfig.Namespace + if namespace == "" { + namespace = fmt.Sprintf("%s-dev", projectName) + } else { + namespace = renderTemplate(t, namespace, tmplData) + } + + // Default port + port := svcConfig.Port + if port == "" { + port = "8080" + } + + // Render service name template + serviceName := renderTemplate(t, svcConfig.ServiceName, tmplData) + + // Wait for service to exist before attempting to expose + logger.Waiting(fmt.Sprintf("Service %s/%s to be ready", namespace, serviceName)) + err := WaitForServiceReady(serviceName, namespace, 120*time.Second) + if err != nil { + logger.Failure(fmt.Sprintf("Service ready check: %s/%s", namespace, serviceName), err) + t.Fatalf("Service not ready: %v", err) + } + + // Construct and resolve the service URL + serviceURL := ConstructServiceURL(serviceName, namespace, port, "") + resolvedURL := ResolveServiceURL(t, serviceURL, tmplData) + + logger.Success(fmt.Sprintf("Service exposed and accessible at: %s", resolvedURL)) + + // Store the resolved URL in template data for use by subsequent steps + // This allows scripts in "run" steps to access the exposed service + serviceKey := fmt.Sprintf("ExposedService_%s", serviceName) + tmplData[serviceKey] = resolvedURL + } + + logger.Success("All services exposed successfully") +} diff --git a/tests/quickstarter/steps/golden.go b/tests/quickstarter/steps/golden.go new file mode 100644 index 000000000..23c196306 --- /dev/null +++ b/tests/quickstarter/steps/golden.go @@ -0,0 +1,45 @@ +package steps + +import ( + "bytes" + "encoding/json" + "fmt" + "html/template" + + "github.com/google/go-cmp/cmp" +) + +// verifyJSONGoldenFile compares actual JSON output against a golden file template +func verifyJSONGoldenFile(componentID string, wantFile string, gotFile string, tmplData TemplateData) error { + + var want bytes.Buffer + tmpl, err := template.ParseFiles(wantFile) + if err != nil { + return fmt.Errorf("failed to load golden file to verify state: %w", err) + } + err = tmpl.Execute(&want, tmplData) + if err != nil { + return fmt.Errorf("failed to render file to verify state: %w", err) + } + + // Unmarshal both JSONs into objects + var wantObj, gotObj interface{} + if err := json.Unmarshal(want.Bytes(), &wantObj); err != nil { + return fmt.Errorf("failed to unmarshal want json: %w", err) + } + if err := json.Unmarshal([]byte(gotFile), &gotObj); err != nil { + return fmt.Errorf("failed to unmarshal got json: %w", err) + } + + // Compare the actual objects, not the strings + if diff := cmp.Diff(wantObj, gotObj); diff != "" { + // Pretty print both for easier comparison + wantJSON, _ := json.MarshalIndent(wantObj, "", " ") //nolint:errcheck + gotJSON, _ := json.MarshalIndent(gotObj, "", " ") //nolint:errcheck + + return fmt.Errorf("state mismatch for %s\n\n=== EXPECTED ===\n%s\n\n=== ACTUAL ===\n%s\n\n=== DIFF (-want +got) ===\n%s", + componentID, string(wantJSON), string(gotJSON), diff) + } + + return nil +} diff --git a/tests/quickstarter/steps/http.go b/tests/quickstarter/steps/http.go new file mode 100644 index 000000000..d4b941973 --- /dev/null +++ b/tests/quickstarter/steps/http.go @@ -0,0 +1,279 @@ +package steps + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "regexp" + "strings" + "testing" + "text/template" + "time" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" + "github.com/tidwall/gjson" +) + +// ExecuteHTTP handles the http step type for testing HTTP endpoints. +func ExecuteHTTP(t *testing.T, step TestStep, testdataPath string, tmplData TemplateData) { + if step.HTTPParams == nil { + t.Fatal("Missing HTTP parameters") + } + + params := step.HTTPParams + + // Resolve URL using smart resolution (route -> in-cluster -> port-forward) + url := ResolveServiceURL(t, params.URL, tmplData) + logger.KeyValue("URL", url) + + // Default method to GET + method := params.Method + if method == "" { + method = "GET" + } + logger.KeyValue("Method", method) + + // Default timeout to 30 seconds + timeout := params.Timeout + if timeout == 0 { + timeout = 30 + } + logger.KeyValue("Timeout", fmt.Sprintf("%ds", timeout)) + + // Default retry attempts + retryAttempts := 1 + retryDelay := 0 * time.Second + if params.Retry != nil { + retryAttempts = params.Retry.Attempts + if retryAttempts == 0 { + retryAttempts = 1 + } + if params.Retry.Delay != "" { + var err error + retryDelay, err = time.ParseDuration(params.Retry.Delay) + if err != nil { + t.Fatalf("Invalid retry delay duration: %s", params.Retry.Delay) + } + } + } + if retryAttempts > 1 { + logger.KeyValue("Retries", fmt.Sprintf("%d (delay: %v)", retryAttempts, retryDelay)) + } + + logger.Running(fmt.Sprintf("Testing HTTP endpoint: %s %s", method, url)) + + var lastErr error + var resp *http.Response + var body []byte + + // Retry logic + for attempt := 1; attempt <= retryAttempts; attempt++ { + if attempt > 1 { + logger.Waiting(fmt.Sprintf("Retry attempt %d/%d after %v", attempt, retryAttempts, retryDelay)) + time.Sleep(retryDelay) + } + + var err error + resp, body, err = executeHTTPRequest(method, url, params, tmplData, timeout) + if err != nil { + lastErr = err + continue + } + + // Check status code + if params.ExpectedStatus > 0 && resp.StatusCode != params.ExpectedStatus { + lastErr = fmt.Errorf("expected status %d, got %d", params.ExpectedStatus, resp.StatusCode) + continue + } + + // All checks passed + lastErr = nil + break + } + + if lastErr != nil { + logger.Failure(fmt.Sprintf("HTTP request after %d attempts", retryAttempts), lastErr) + t.Fatalf("HTTP request failed after %d attempts: %v", retryAttempts, lastErr) + } + + logger.Success(fmt.Sprintf("HTTP request returned %d %s", resp.StatusCode, http.StatusText(resp.StatusCode))) + + // Verify expected body if provided + if params.ExpectedBody != "" { + logger.Waiting("Verifying response body against golden file") + goldenFile := fmt.Sprintf("%s/%s", testdataPath, params.ExpectedBody) + if err := verifyJSONGoldenFile(step.ComponentID, goldenFile, string(body), tmplData); err != nil { + logger.Failure("Response body verification", err) + t.Fatalf("Response body mismatch: %v", err) + } + logger.Success("Response body matches golden file") + } + + // Run assertions + if len(params.Assertions) > 0 { + logger.Waiting(fmt.Sprintf("Running %d assertion(s)", len(params.Assertions))) + if err := verifyHTTPAssertions(params.Assertions, body, tmplData, t); err != nil { + logger.Failure("HTTP assertions", err) + t.Fatalf("Assertion failed: %v", err) + } + logger.Success(fmt.Sprintf("All %d assertions passed", len(params.Assertions))) + } +} + +// executeHTTPRequest performs the actual HTTP request +func executeHTTPRequest(method, url string, params *TestStepHTTPParams, tmplData TemplateData, timeout int) (*http.Response, []byte, error) { + // Create request body if provided + var bodyReader io.Reader + if params.Body != "" { + renderedBody := renderTemplateHTTP(nil, params.Body, tmplData) + bodyReader = strings.NewReader(renderedBody) + } + + // Create request + req, err := http.NewRequest(method, url, bodyReader) + if err != nil { + return nil, nil, fmt.Errorf("failed to create request: %w", err) + } + + // Add headers + for key, value := range params.Headers { + renderedValue := renderTemplateHTTP(nil, value, tmplData) + req.Header.Set(key, renderedValue) + } + + // Create client with timeout + client := &http.Client{ + Timeout: time.Duration(timeout) * time.Second, + } + + // Execute request + resp, err := client.Do(req) + if err != nil { + return nil, nil, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() //nolint:errcheck + + // Read response body + body, err := io.ReadAll(resp.Body) + if err != nil { + return resp, nil, fmt.Errorf("failed to read response body: %w", err) + } + + return resp, body, nil +} + +// verifyHTTPAssertions verifies JSONPath-based assertions +func verifyHTTPAssertions(assertions []HTTPAssertion, body []byte, tmplData TemplateData, t *testing.T) error { + bodyStr := string(body) + + // Try to parse as JSON for JSON assertions + var isJSON bool + var jsonData interface{} + if err := json.Unmarshal(body, &jsonData); err == nil { + isJSON = true + } + + for i, assertion := range assertions { + // Handle Equals assertion + if assertion.Equals != nil { + if !isJSON { + return fmt.Errorf("assertion %d: cannot use JSONPath on non-JSON response", i) + } + result := gjson.Get(bodyStr, assertion.Path) + if !result.Exists() { + return fmt.Errorf("assertion %d: path %s does not exist", i, assertion.Path) + } + + expectedStr := fmt.Sprintf("%v", assertion.Equals) + actualStr := result.String() + if expectedStr != actualStr { + return fmt.Errorf("assertion %d: path %s expected %v, got %v", i, assertion.Path, assertion.Equals, result.Value()) + } + } + + // Handle Exists assertion + if assertion.Exists != nil { + if !isJSON { + return fmt.Errorf("assertion %d: cannot use JSONPath on non-JSON response", i) + } + result := gjson.Get(bodyStr, assertion.Path) + exists := result.Exists() + if *assertion.Exists != exists { + return fmt.Errorf("assertion %d: path %s existence check failed (expected %v, got %v)", i, assertion.Path, *assertion.Exists, exists) + } + } + + // Handle Contains assertion + if assertion.Contains != "" { + if isJSON && assertion.Path != "" { + result := gjson.Get(bodyStr, assertion.Path) + if !result.Exists() { + return fmt.Errorf("assertion %d: path %s does not exist", i, assertion.Path) + } + if !strings.Contains(result.String(), assertion.Contains) { + return fmt.Errorf("assertion %d: path %s does not contain %q", i, assertion.Path, assertion.Contains) + } + } else { + if !strings.Contains(bodyStr, assertion.Contains) { + return fmt.Errorf("assertion %d: response does not contain %q", i, assertion.Contains) + } + } + } + + // Handle Matches (regex) assertion + if assertion.Matches != "" { + var target string + if isJSON && assertion.Path != "" { + result := gjson.Get(bodyStr, assertion.Path) + if !result.Exists() { + return fmt.Errorf("assertion %d: path %s does not exist", i, assertion.Path) + } + target = result.String() + } else { + target = bodyStr + } + + matched, err := regexp.MatchString(assertion.Matches, target) + if err != nil { + return fmt.Errorf("assertion %d: invalid regex %q: %w", i, assertion.Matches, err) + } + if !matched { + return fmt.Errorf("assertion %d: value does not match regex %q", i, assertion.Matches) + } + } + } + + return nil +} + +// renderTemplateHTTP is a helper that allows nil *testing.T for non-test contexts +func renderTemplateHTTP(t *testing.T, tpl string, tmplData TemplateData) string { + // If template is empty, return as-is + if tpl == "" { + return tpl + } + + // If no template markers, return as-is + if !strings.Contains(tpl, "{{") { + return tpl + } + + var buffer bytes.Buffer + tmpl, err := template.New("inline").Parse(tpl) + if err != nil { + if t != nil { + t.Fatalf("Error parsing template: %s", err) + } + panic(fmt.Sprintf("Error parsing template: %s", err)) + } + tmplErr := tmpl.Execute(&buffer, tmplData) + if tmplErr != nil { + if t != nil { + t.Fatalf("Error rendering template: %s", tmplErr) + } + panic(fmt.Sprintf("Error rendering template: %s", tmplErr)) + } + return buffer.String() +} diff --git a/tests/quickstarter/steps/inspect.go b/tests/quickstarter/steps/inspect.go new file mode 100644 index 000000000..10f0d83e0 --- /dev/null +++ b/tests/quickstarter/steps/inspect.go @@ -0,0 +1,270 @@ +package steps + +import ( + "bytes" + "fmt" + "os/exec" + "regexp" + "strings" + "testing" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" +) + +// ExecuteInspect handles the inspect step type for inspecting container runtime behavior. +func ExecuteInspect(t *testing.T, step TestStep, testdataPath string, tmplData TemplateData, projectName string) { + if step.InspectParams == nil { + t.Fatal("Missing inspect parameters") + } + + params := step.InspectParams + + // Default namespace to {project}-dev + namespace := params.Namespace + if namespace == "" { + namespace = fmt.Sprintf("%s-dev", projectName) + } + namespace = renderTemplate(t, namespace, tmplData) + + // Render resource with template data + resource := renderTemplate(t, params.Resource, tmplData) + + logger.Running(fmt.Sprintf("Inspecting resource: %s in namespace %s", resource, namespace)) + + if params.Checks == nil { + logger.Info("No checks specified, skipping inspection") + return + } + + // Run log checks if specified + if params.Checks.Logs != nil { + if err := checkLogs(t, resource, namespace, params.Checks.Logs); err != nil { + t.Fatalf("Log check failed: %v", err) + } + } + + // Run environment variable checks if specified + if len(params.Checks.Env) > 0 { + if err := checkEnvironmentVariables(t, resource, namespace, params.Checks.Env, tmplData); err != nil { + t.Fatalf("Environment variable check failed: %v", err) + } + } + + // Run resource checks if specified + if params.Checks.Resources != nil { + if err := checkResources(t, resource, namespace, params.Checks.Resources); err != nil { + t.Fatalf("Resource check failed: %v", err) + } + } + + logger.Success(fmt.Sprintf("All inspection checks passed for %s", resource)) +} + +// checkLogs verifies log content +func checkLogs(t *testing.T, resource string, namespace string, logChecks *LogChecks) error { + logger.Waiting(fmt.Sprintf("Checking logs for %s", resource)) + + // Get logs from the resource + cmd := exec.Command("oc", "logs", resource, "-n", namespace, "--tail=500") + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return fmt.Errorf("failed to get logs: %w\nstderr: %s", err, stderr.String()) + } + + logs := stdout.String() + + // Check for required strings + for _, required := range logChecks.Contains { + if !strings.Contains(logs, required) { + return fmt.Errorf("logs do not contain required string: %q", required) + } + logger.KeyValue("Found required string", required) + } + + // Check for forbidden strings + for _, forbidden := range logChecks.NotContains { + if strings.Contains(logs, forbidden) { + return fmt.Errorf("logs contain forbidden string: %q", forbidden) + } + logger.KeyValue("Does not contain", forbidden) + } + + // Check regex patterns + for _, pattern := range logChecks.Matches { + matched, err := regexp.MatchString(pattern, logs) + if err != nil { + return fmt.Errorf("invalid regex pattern %q: %w", pattern, err) + } + if !matched { + return fmt.Errorf("logs do not match required pattern: %q", pattern) + } + logger.KeyValue("Matches required pattern", pattern) + } + + logger.Success("Log checks passed") + return nil +} + +// checkEnvironmentVariables verifies environment variables in the container +func checkEnvironmentVariables(t *testing.T, resource string, namespace string, expectedEnv map[string]string, tmplData TemplateData) error { + logger.Waiting(fmt.Sprintf("Checking environment variables for %s", resource)) + + // Extract resource type and name + parts := strings.Split(resource, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid resource format, expected type/name, got: %s", resource) + } + resourceType := parts[0] + _ = parts[1] // resourceName not used currently but kept for future use + + // Get environment variables from the resource + var jsonPath string + switch resourceType { + case "deployment", "deploy": + jsonPath = "{.spec.template.spec.containers[0].env}" + case "deploymentconfig", "dc": + jsonPath = "{.spec.template.spec.containers[0].env}" + case "pod", "po": + jsonPath = "{.spec.containers[0].env}" + default: + return fmt.Errorf("unsupported resource type for env check: %s", resourceType) + } + + cmd := exec.Command("oc", "get", resource, "-n", namespace, "-o", "jsonpath="+jsonPath) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return fmt.Errorf("failed to get environment variables: %w\nstderr: %s", err, stderr.String()) + } + + envOutput := stdout.String() + + // Check each expected environment variable + for key, expectedValue := range expectedEnv { + // Render expected value with template data + renderedValue := renderTemplate(t, expectedValue, tmplData) + + // Look for the env var in the output (format: name:key value:val) + searchPattern := fmt.Sprintf(`name:%s.*?value:%s`, key, regexp.QuoteMeta(renderedValue)) + matched, err := regexp.MatchString(searchPattern, envOutput) + if err != nil { + return fmt.Errorf("regex error checking env var %s: %w", key, err) + } + if !matched { + // Try alternative format (just checking if key exists with any value) + keyPattern := fmt.Sprintf(`name:%s`, key) + keyMatched, err := regexp.MatchString(keyPattern, envOutput) + if err != nil { + return fmt.Errorf("regex error checking env var key %s: %w", key, err) + } + if !keyMatched { + return fmt.Errorf("environment variable %s not found", key) + } + // Key exists but value might be different - let's try to extract and compare + return fmt.Errorf("environment variable %s exists but value does not match %q", key, renderedValue) + } + logger.KeyValue(fmt.Sprintf("Environment variable %s", key), renderedValue) + } + + logger.Success("Environment variable checks passed") + return nil +} + +// checkResources verifies resource limits and requests +func checkResources(t *testing.T, resource string, namespace string, resourceChecks *ResourceChecks) error { + logger.Waiting(fmt.Sprintf("Checking resource limits/requests for %s", resource)) + + // Extract resource type and name + parts := strings.Split(resource, "/") + if len(parts) != 2 { + return fmt.Errorf("invalid resource format, expected type/name, got: %s", resource) + } + resourceType := parts[0] + + // Get resource limits and requests from the resource + var jsonPath string + switch resourceType { + case "deployment", "deploy": + jsonPath = "{.spec.template.spec.containers[0].resources}" + case "deploymentconfig", "dc": + jsonPath = "{.spec.template.spec.containers[0].resources}" + case "pod", "po": + jsonPath = "{.spec.containers[0].resources}" + default: + return fmt.Errorf("unsupported resource type for resource check: %s", resourceType) + } + + cmd := exec.Command("oc", "get", resource, "-n", namespace, "-o", "jsonpath="+jsonPath) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err != nil { + return fmt.Errorf("failed to get resource limits/requests: %w\nstderr: %s", err, stderr.String()) + } + + resourceOutput := stdout.String() + + // Check limits + if resourceChecks.Limits != nil { + if resourceChecks.Limits.CPU != "" { + searchPattern := fmt.Sprintf(`limits:.*?cpu:%s`, regexp.QuoteMeta(resourceChecks.Limits.CPU)) + matched, err := regexp.MatchString(searchPattern, resourceOutput) + if err != nil { + return fmt.Errorf("regex error checking CPU limit: %w", err) + } + if !matched { + return fmt.Errorf("CPU limit does not match expected value: %s", resourceChecks.Limits.CPU) + } + logger.KeyValue("CPU limit", resourceChecks.Limits.CPU) + } + if resourceChecks.Limits.Memory != "" { + searchPattern := fmt.Sprintf(`limits:.*?memory:%s`, regexp.QuoteMeta(resourceChecks.Limits.Memory)) + matched, err := regexp.MatchString(searchPattern, resourceOutput) + if err != nil { + return fmt.Errorf("regex error checking memory limit: %w", err) + } + if !matched { + return fmt.Errorf("Memory limit does not match expected value: %s", resourceChecks.Limits.Memory) + } + logger.KeyValue("Memory limit", resourceChecks.Limits.Memory) + } + } + + // Check requests + if resourceChecks.Requests != nil { + if resourceChecks.Requests.CPU != "" { + searchPattern := fmt.Sprintf(`requests:.*?cpu:%s`, regexp.QuoteMeta(resourceChecks.Requests.CPU)) + matched, err := regexp.MatchString(searchPattern, resourceOutput) + if err != nil { + return fmt.Errorf("regex error checking CPU request: %w", err) + } + if !matched { + return fmt.Errorf("CPU request does not match expected value: %s", resourceChecks.Requests.CPU) + } + logger.KeyValue("CPU request", resourceChecks.Requests.CPU) + } + if resourceChecks.Requests.Memory != "" { + searchPattern := fmt.Sprintf(`requests:.*?memory:%s`, regexp.QuoteMeta(resourceChecks.Requests.Memory)) + matched, err := regexp.MatchString(searchPattern, resourceOutput) + if err != nil { + return fmt.Errorf("regex error checking memory request: %w", err) + } + if !matched { + return fmt.Errorf("Memory request does not match expected value: %s", resourceChecks.Requests.Memory) + } + logger.KeyValue("Memory request", resourceChecks.Requests.Memory) + } + } + + logger.Success("Resource checks passed") + return nil +} diff --git a/tests/quickstarter/steps/openshift.go b/tests/quickstarter/steps/openshift.go new file mode 100644 index 000000000..a2c121255 --- /dev/null +++ b/tests/quickstarter/steps/openshift.go @@ -0,0 +1,141 @@ +package steps + +import ( + "bytes" + "fmt" + "os" + "os/exec" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" +) + +// deleteOpenShiftResources deletes all OpenShift resources with the app label +func deleteOpenShiftResources(projectID string, componentID string, namespace string) error { + // Check if resources should be kept + if os.Getenv("KEEP_RESOURCES") == "true" { + logger.Warn("KEEP_RESOURCES=true: Skipping cleanup for component: %s in namespace: %s", componentID, namespace) + return nil + } + logger.Running(fmt.Sprintf("Cleanup for component: %s in namespace: %s", componentID, namespace)) + label := fmt.Sprintf("app=%s-%s", projectID, componentID) + logger.Debug("Delete resources labelled with: %s", label) + stdout, stderr, err := runOcCmd([]string{ + "-n", namespace, + "delete", "all", "-l", label, + }) + if err != nil { + return fmt.Errorf( + "Could not delete all resources labelled with %s: \nStdOut: %s\nStdErr: %s\n\nErr: %w", + label, + stdout, + stderr, + err, + ) + } + + logger.Success(fmt.Sprintf("Cleaned up resources with label: %s", label)) + return nil +} + +// deleteOpenShiftResourceByName deletes a specific OpenShift resource by name +func deleteOpenShiftResourceByName(resourceType string, resourceName string, namespace string) error { + // Check if resources should be kept + if os.Getenv("KEEP_RESOURCES") == "true" { + logger.Warn("KEEP_RESOURCES=true: Skipping cleanup for resource: %s/%s in namespace: %s", resourceType, resourceName, namespace) + return nil + } + logger.Running(fmt.Sprintf("Cleanup for resource: %s/%s in %s", resourceType, resourceName, namespace)) + resource := fmt.Sprintf("%s/%s", resourceType, resourceName) + + stdout, stderr, err := runOcCmd([]string{ + "-n", namespace, + "delete", resource, + }) + + if err != nil { + return fmt.Errorf( + "Could not delete resource %s: \nStdOut: %s\nStdErr: %s\n\nErr: %w", + resource, + stdout, + stderr, + err, + ) + } + + logger.Success(fmt.Sprintf("Cleaned up resource: %s", resource)) + return nil +} + +// deleteHelmRelease deletes a Helm release +func deleteHelmRelease(releaseName string, namespace string) error { + // Check if resources should be kept + if os.Getenv("KEEP_RESOURCES") == "true" { + logger.Warn("KEEP_RESOURCES=true: Skipping cleanup for Helm release: %s in namespace: %s", releaseName, namespace) + return nil + } + logger.Waiting(fmt.Sprintf("Checking for Helm release: %s in %s", releaseName, namespace)) + + // Check if the release exists + stdout, stderr, err := runHelmCmd([]string{ + "list", + "-n", namespace, + "-q", // quiet output, just release names + "-f", releaseName, // filter by release name + }) + + if err != nil { + return fmt.Errorf( + "Could not list Helm releases in %s: \nStdOut: %s\nStdErr: %s\n\nErr: %w", + namespace, + stdout, + stderr, + err, + ) + } + + // If the release doesn't exist, skip cleanup + if stdout == "" || len(bytes.TrimSpace([]byte(stdout))) == 0 { + logger.Info("Helm release %s not found, skipping cleanup", releaseName) + return nil + } + + logger.Running(fmt.Sprintf("Cleanup for Helm release: %s", releaseName)) + + stdout, stderr, err = runHelmCmd([]string{ + "uninstall", releaseName, + "-n", namespace, + }) + + if err != nil { + return fmt.Errorf( + "Could not delete Helm release %s: \nStdOut: %s\nStdErr: %s\n\nErr: %w", + releaseName, + stdout, + stderr, + err, + ) + } + + logger.Success(fmt.Sprintf("Cleaned up Helm release: %s", releaseName)) + return nil +} + +// runOcCmd executes an oc command +func runOcCmd(args []string) (string, string, error) { + cmd := exec.Command("oc", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + return stdout.String(), stderr.String(), err +} + +// runHelmCmd executes a helm command +func runHelmCmd(args []string) (string, string, error) { + cmd := exec.Command("helm", args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + err := cmd.Run() + return stdout.String(), stderr.String(), err +} diff --git a/tests/quickstarter/steps/portforward.go b/tests/quickstarter/steps/portforward.go new file mode 100644 index 000000000..20a5b95c7 --- /dev/null +++ b/tests/quickstarter/steps/portforward.go @@ -0,0 +1,291 @@ +package steps + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "strings" + "sync" + "syscall" + "time" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" +) + +// PortForwardManager manages the lifecycle of port-forwards for local development +type PortForwardManager struct { + forwards map[string]*PortForward + mu sync.Mutex + nextLocalPort int +} + +// PortForward represents a single port-forward session +type PortForward struct { + ServiceName string + Namespace string + RemotePort string + LocalPort int + Cmd *exec.Cmd + Started time.Time +} + +var ( + globalPortForwardManager = &PortForwardManager{ + forwards: make(map[string]*PortForward), + nextLocalPort: 8000, + } +) + +// EnsurePortForward ensures a port-forward exists for the given service +// Returns the local port number that can be used to access the service +func EnsurePortForward(serviceName, namespace, remotePort string) (int, error) { + return globalPortForwardManager.ensurePortForward(serviceName, namespace, remotePort) +} + +// CleanupAllPortForwards terminates all active port-forwards +func CleanupAllPortForwards() { + globalPortForwardManager.cleanupAll() +} + +// ensurePortForward is the internal implementation +func (m *PortForwardManager) ensurePortForward(serviceName, namespace, remotePort string) (int, error) { + m.mu.Lock() + defer m.mu.Unlock() + + key := fmt.Sprintf("%s/%s:%s", namespace, serviceName, remotePort) + + // Check if port-forward already exists and is healthy + if pf, exists := m.forwards[key]; exists { + if m.isHealthy(pf) { + logger.Success(fmt.Sprintf("Reusing existing port-forward: localhost:%d -> %s/%s:%s", + pf.LocalPort, namespace, serviceName, remotePort)) + return pf.LocalPort, nil + } + // Port-forward exists but is unhealthy, clean it up + logger.Warn("Existing port-forward is unhealthy, recreating...") + m.cleanup(pf) + delete(m.forwards, key) + } + + // Start new port-forward + localPort := m.nextLocalPort + m.nextLocalPort++ + + pf, err := m.startPortForward(serviceName, namespace, remotePort, localPort) + if err != nil { + return 0, fmt.Errorf("failed to start port-forward: %w", err) + } + + m.forwards[key] = pf + logger.Success(fmt.Sprintf("Port-forward established: localhost:%d -> %s/%s:%s", + localPort, namespace, serviceName, remotePort)) + + return localPort, nil +} + +// startPortForward starts a new port-forward process +func (m *PortForwardManager) startPortForward(serviceName, namespace, remotePort string, startPort int) (*PortForward, error) { + // Try up to 10 different ports in case of conflicts + var lastErr error + currentPort := startPort + + for attempt := 1; attempt <= 10; attempt++ { + if attempt > 1 { + // Try next port + currentPort = startPort + attempt - 1 + logger.Warn("Port %d in use, trying port %d...", currentPort-1, currentPort) + } + + pf, err := m.startPortForwardAttempt(serviceName, namespace, remotePort, currentPort) + if err == nil { + // Success! Update nextLocalPort to avoid this port in future + if currentPort >= m.nextLocalPort { + m.nextLocalPort = currentPort + 1 + } + logger.Success(fmt.Sprintf("Port-forward established: localhost:%d -> %s/%s:%s", + currentPort, namespace, serviceName, remotePort)) + return pf, nil + } + lastErr = err + + // If error is not about port already in use, fail immediately + // Don't retry with same port as this causes duplicate port-forwards + if !isPortInUseError(err) { + return nil, fmt.Errorf("failed to start port-forward: %w", err) + } + + // Port is in use, loop will try next port + } + + return nil, fmt.Errorf("failed to find available port after trying %d-%d: %w", startPort, startPort+9, lastErr) +} + +// startPortForwardAttempt performs a single attempt to start port-forward +func (m *PortForwardManager) startPortForwardAttempt(serviceName, namespace, remotePort string, localPort int) (*PortForward, error) { + portMapping := fmt.Sprintf("%d:%s", localPort, remotePort) + serviceRef := fmt.Sprintf("svc/%s", serviceName) + + cmd := exec.Command("oc", "port-forward", + serviceRef, + portMapping, + "-n", namespace) + + // Capture both stdout and stderr to detect errors + // oc port-forward writes "Forwarding from..." to stdout + // and errors to stderr + var stdoutBuf, stderrBuf bytes.Buffer + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + + // Start the process + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start oc port-forward: %w", err) + } + + pf := &PortForward{ + ServiceName: serviceName, + Namespace: namespace, + RemotePort: remotePort, + LocalPort: localPort, + Cmd: cmd, + Started: time.Now(), + } + + // Wait a bit for port-forward to establish + time.Sleep(2 * time.Second) + + stderrOutput := stderrBuf.String() + stdoutOutput := stdoutBuf.String() + + // Check if process is still running + if m.isHealthy(pf) { + if stdoutOutput != "" { + fmt.Print(stdoutOutput) + } + return pf, nil + } + + // Process not healthy — check if stderr shows an error + if stderrOutput != "" { + fmt.Fprintf(os.Stderr, "%s", stderrOutput) + if pf.Cmd.Process != nil { + // Process already failed, ignore kill error + pf.Cmd.Process.Kill() //nolint:errcheck + } + return nil, fmt.Errorf("port-forward failed: %s", stderrOutput) + } + + // Sometimes oc port-forward continues running in background even if we can't track it. + // If stdout indicates it started, treat as success. + if strings.Contains(stdoutOutput, "Forwarding from") { + fmt.Print(stdoutOutput) + return pf, nil + } + + return nil, fmt.Errorf("port-forward process died immediately after start") +} + +// isHealthy checks if a port-forward is still running +func (m *PortForwardManager) isHealthy(pf *PortForward) bool { + if pf.Cmd == nil || pf.Cmd.Process == nil { + return false + } + + // Check if process is still running + // On Unix, sending signal 0 checks if process exists without actually sending a signal + if err := pf.Cmd.Process.Signal(syscall.Signal(0)); err != nil { + return false + } + + return true +} + +// cleanup terminates a single port-forward +func (m *PortForwardManager) cleanup(pf *PortForward) { + if pf.Cmd != nil && pf.Cmd.Process != nil { + // Try graceful termination first + pf.Cmd.Process.Signal(os.Interrupt) //nolint:errcheck + + // Wait briefly for graceful shutdown + done := make(chan bool, 1) + go func() { + pf.Cmd.Wait() //nolint:errcheck + done <- true + }() + + select { + case <-done: + // Process terminated gracefully + case <-time.After(2 * time.Second): + // Force kill if not terminated + pf.Cmd.Process.Kill() //nolint:errcheck + } + } +} + +// cleanupAll terminates all active port-forwards +func (m *PortForwardManager) cleanupAll() { + m.mu.Lock() + defer m.mu.Unlock() + + if len(m.forwards) == 0 { + return + } + + logger.SubSection("Cleaning up port-forwards") + + for key, pf := range m.forwards { + logger.Running(fmt.Sprintf("Terminating port-forward: localhost:%d -> %s/%s:%s", + pf.LocalPort, pf.Namespace, pf.ServiceName, pf.RemotePort)) + m.cleanup(pf) + delete(m.forwards, key) + } + + logger.Success("All port-forwards cleaned up") +} + +// GetActivePortForwards returns information about active port-forwards (for debugging) +func GetActivePortForwards() []string { + globalPortForwardManager.mu.Lock() + defer globalPortForwardManager.mu.Unlock() + + var result []string + for _, pf := range globalPortForwardManager.forwards { + status := "healthy" + if !globalPortForwardManager.isHealthy(pf) { + status = "unhealthy" + } + result = append(result, fmt.Sprintf("localhost:%d -> %s/%s:%s (%s, started %s)", + pf.LocalPort, pf.Namespace, pf.ServiceName, pf.RemotePort, status, + pf.Started.Format("15:04:05"))) + } + return result +} + +// isPortInUseError checks if an error is due to port already being in use +func isPortInUseError(err error) bool { + if err == nil { + return false + } + errMsg := err.Error() + return contains(errMsg, "address already in use") || + contains(errMsg, "bind: address already in use") || + contains(errMsg, "unable to listen on port") +} + +// contains is a simple string contains helper +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(substr) == 0 || + (len(s) > 0 && len(substr) > 0 && findSubstring(s, substr))) +} + +// findSubstring checks if substr exists in s +func findSubstring(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/tests/quickstarter/steps/provision.go b/tests/quickstarter/steps/provision.go new file mode 100644 index 000000000..3a1d59dae --- /dev/null +++ b/tests/quickstarter/steps/provision.go @@ -0,0 +1,181 @@ +package steps + +import ( + "fmt" + "os" + "strings" + "testing" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" + "github.com/opendevstack/ods-core/tests/utils" +) + +// ExecuteProvision handles the provision step type. +func ExecuteProvision(t *testing.T, step TestStep, testdataPath string, tmplData TemplateData, repoName string, quickstarterRepo string, quickstarterName string, config map[string]string, projectName string) { + logger.Running(fmt.Sprintf("Provision for %s-%s", projectName, repoName)) + + // cleanup and create bb resources for this test + err := recreateBitbucketRepo(config, projectName, repoName) + if err != nil { + logger.Failure("Provision - BitBucket repo recreation", err) + t.Fatal(err) + } + logger.Success("BitBucket repo created/updated") + + projectNameDev := fmt.Sprintf("%s-dev", projectName) + projectNameTest := fmt.Sprintf("%s-test", projectName) + projectNameCD := fmt.Sprintf("%s-cd", projectName) + + err = deleteOpenShiftResources(projectName, step.ComponentID, projectNameCD) + if err != nil { + logger.Failure(fmt.Sprintf("Delete OpenShift resources in %s-cd", projectName), err) + t.Fatal(err) + } + err = deleteOpenShiftResources(projectName, step.ComponentID, projectNameDev) + if err != nil { + logger.Failure(fmt.Sprintf("Delete OpenShift resources in %s-dev", projectName), err) + t.Fatal(err) + } + err = deleteOpenShiftResources(projectName, step.ComponentID, projectNameTest) + if err != nil { + logger.Failure(fmt.Sprintf("Delete OpenShift resources in %s-test", projectName), err) + t.Fatal(err) + } + logger.Success("OpenShift resources cleaned up") + + if len(step.ProvisionParams.TestResourcesCleanUp) > 0 { + logger.Running("Cleaning up test resources") + for _, it := range step.ProvisionParams.TestResourcesCleanUp { + tmpNamespace := it.Namespace + if tmpNamespace == "" { + tmpNamespace = DefaultNamespace + } + namespace := fmt.Sprintf("%s-%s", projectName, tmpNamespace) + if err := deleteOpenShiftResourceByName(it.ResourceType, it.ResourceName, namespace); err != nil { + logger.Warn("Failed to cleanup resource %s/%s: %v", it.ResourceType, it.ResourceName, err) + } else { + logger.KeyValue(fmt.Sprintf("Cleaned %s", it.ResourceType), it.ResourceName) + } + } + } + + branch := config["ODS_GIT_REF"] + if len(step.ProvisionParams.Branch) > 0 { + branch = renderTemplate(t, step.ProvisionParams.Branch, tmplData) + } + agentImageTag := config["ODS_IMAGE_TAG"] + if len(step.ProvisionParams.AgentImageTag) > 0 { + agentImageTag = renderTemplate(t, step.ProvisionParams.AgentImageTag, tmplData) + } + sharedLibraryRef := agentImageTag + if len(step.ProvisionParams.SharedLibraryRef) > 0 { + sharedLibraryRef = renderTemplate(t, step.ProvisionParams.SharedLibraryRef, tmplData) + } + env := []utils.EnvPair{ + { + Name: "ODS_NAMESPACE", + Value: config["ODS_NAMESPACE"], + }, + { + Name: "ODS_GIT_REF", + Value: config["ODS_GIT_REF"], + }, + { + Name: "ODS_IMAGE_TAG", + Value: config["ODS_IMAGE_TAG"], + }, + { + Name: "ODS_BITBUCKET_PROJECT", + Value: config["ODS_BITBUCKET_PROJECT"], + }, + { + Name: "AGENT_IMAGE_TAG", + Value: agentImageTag, + }, + { + Name: "SHARED_LIBRARY_REF", + Value: sharedLibraryRef, + }, + { + Name: "PROJECT_ID", + Value: projectName, + }, + { + Name: "COMPONENT_ID", + Value: step.ComponentID, + }, + { + Name: "GIT_URL_HTTP", + Value: fmt.Sprintf("%s/%s/%s.git", config["REPO_BASE"], projectName, repoName), + }, + } + + t.Cleanup(func() { + // Check if resources should be kept + if os.Getenv("KEEP_RESOURCES") == "true" { + logger.Warn("KEEP_RESOURCES=true: Skipping resource cleanup for component %s", step.ComponentID) + return + } + logger.Running(fmt.Sprintf("Cleaning up resources for component %s", step.ComponentID)) + if err := deleteOpenShiftResources(projectName, step.ComponentID, projectNameCD); err != nil { + logger.Warn("Failed to cleanup CD resources: %v", err) + } + if err := deleteOpenShiftResources(projectName, step.ComponentID, projectNameDev); err != nil { + logger.Warn("Failed to cleanup DEV resources: %v", err) + } + if err := deleteOpenShiftResources(projectName, step.ComponentID, projectNameTest); err != nil { + logger.Warn("Failed to cleanup TEST resources: %v", err) + } + if err := deleteHelmRelease(step.ComponentID, projectNameCD); err != nil { + logger.Warn("Failed to cleanup Helm release in CD namespace: %v", err) + } + if err := deleteHelmRelease(step.ComponentID, projectNameDev); err != nil { + logger.Warn("Failed to cleanup Helm release in DEV namespace: %v", err) + } + if err := deleteHelmRelease(step.ComponentID, projectNameTest); err != nil { + logger.Warn("Failed to cleanup Helm release in TEST namespace: %v", err) + } + logger.Success("Resource cleanup completed") + }) + + // Checks if it was overridden including a repository name in the same project like 'repo/quickstarter'. + var repository string = quickstarterRepo + var repositoryIndex int = strings.Index(step.ProvisionParams.Quickstarter, "/") + if len(step.ProvisionParams.Quickstarter) > 0 && repositoryIndex != -1 { + repository = step.ProvisionParams.Quickstarter[:repositoryIndex] + } + // If quickstarter is overridden, use that value. Otherwise + // we use the quickstarter under test. + var jenkinsfile string = fmt.Sprintf("%s/%s", quickstarterName, DefaultJenkinsfile) + if len(step.ProvisionParams.Quickstarter) > 0 { + jenkinsfile = fmt.Sprintf("%s/%s", step.ProvisionParams.Quickstarter, DefaultJenkinsfile) + } + if len(step.ProvisionParams.Quickstarter) > 0 && repositoryIndex != -1 { + jenkinsfile = fmt.Sprintf("%s/%s", step.ProvisionParams.Quickstarter[repositoryIndex+1:], DefaultJenkinsfile) + } + + pipelineName := step.ProvisionParams.Pipeline + verify := step.ProvisionParams.Verify + + // Render environment variable values through template engine + renderedEnv := make([]utils.EnvPair, len(step.ProvisionParams.Env)) + for i, envPair := range step.ProvisionParams.Env { + renderedEnv[i] = utils.EnvPair{ + Name: envPair.Name, + Value: renderTemplate(t, envPair.Value, tmplData), + } + } + + request := utils.RequestBuild{ + Repository: repository, + Branch: branch, + Project: config["ODS_BITBUCKET_PROJECT"], + Env: append(env, renderedEnv...), + } + + buildName, err := utils.RunJenkinsPipeline(jenkinsfile, request, pipelineName) + if err != nil { + t.Fatal(err) + } + verifyPipelineRun(t, step, verify, testdataPath, repoName, buildName, config, projectName) +} diff --git a/tests/quickstarter/steps/registry.go b/tests/quickstarter/steps/registry.go new file mode 100644 index 000000000..79286fd6c --- /dev/null +++ b/tests/quickstarter/steps/registry.go @@ -0,0 +1,219 @@ +package steps + +import ( + "fmt" + "sync" + "testing" +) + +// Registry Pattern for Step Execution +// +// The test framework uses a registry pattern to manage step handlers, making it easy to +// add new step types without modifying the core test execution logic. +// +// To add a new step type: +// +// 1. Define a step type constant in types.go: +// const StepTypeMyCustom = "my-custom" +// +// 2. Add parameters struct to types.go (if needed): +// type MyCustomParams struct { +// Target string `json:"target"` +// } +// +// 3. Add params field to TestStep in types.go: +// MyCustomParams *MyCustomParams `json:"myCustomParams,omitempty"` +// +// 4. Implement execution logic in my_custom.go: +// func ExecuteMyCustom(t *testing.T, step TestStep, ...) { +// // Your implementation here +// } +// +// 5. Create handler adapter below: +// type MyCustomHandler struct{} +// func (h *MyCustomHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { +// ExecuteMyCustom(t, *step, params.TestdataPath, params.TmplData, params.ProjectName) +// return nil +// } +// +// 6. Register in registerDefaultHandlers(): +// defaultRegistry.Register(StepTypeMyCustom, &MyCustomHandler{}) +// +// See QUICKSTARTERS_TESTS.md section "Developing Custom Step Types" for complete guide. + +// ExecutionParams consolidates all parameters needed for step execution. +// This struct provides a unified interface for passing context to step handlers, +// making it easier to add new parameters without changing handler signatures. +// +// When adding new commonly-needed parameters, extend this struct rather than +// changing individual step function signatures. +type ExecutionParams struct { + TestdataPath string // Path to the testdata directory + TmplData TemplateData // Template variables for rendering + RepoName string // Repository name (format: project-component) + QuickstarterRepo string // Quickstarter repository name + QuickstarterName string // Quickstarter name + Config map[string]string // Configuration key-value pairs + ProjectName string // OpenShift project name +} + +// StepHandler defines the interface that all step handlers must implement. +// This allows for a clean registry pattern where steps can be registered +// and executed polymorphically. +// +// Implementations should delegate to specific Execute* functions (e.g., ExecuteUpload, +// ExecuteRun) which contain the actual step logic. +type StepHandler interface { + // Execute runs the step logic with the given parameters. + // Returns an error if the step execution fails. + Execute(t *testing.T, step *TestStep, params *ExecutionParams) error +} + +// StepRegistry manages the mapping of step types to their handlers. +// It provides thread-safe registration and retrieval of step handlers, +// enabling a plugin-like architecture for step types. +type StepRegistry struct { + handlers map[string]StepHandler + mu sync.RWMutex +} + +// NewStepRegistry creates a new empty step registry. +func NewStepRegistry() *StepRegistry { + return &StepRegistry{ + handlers: make(map[string]StepHandler), + } +} + +// Register adds a handler for a specific step type. +// If a handler already exists for the given type, it will be overwritten. +func (r *StepRegistry) Register(stepType string, handler StepHandler) { + r.mu.Lock() + defer r.mu.Unlock() + r.handlers[stepType] = handler +} + +// Get retrieves the handler for a specific step type. +// Returns an error if no handler is registered for the given type. +func (r *StepRegistry) Get(stepType string) (StepHandler, error) { + r.mu.RLock() + defer r.mu.RUnlock() + + handler, ok := r.handlers[stepType] + if !ok { + return nil, fmt.Errorf("unknown step type: %s", stepType) + } + return handler, nil +} + +// defaultRegistry is the global registry instance used throughout the application. +var ( + defaultRegistry *StepRegistry + once sync.Once +) + +// DefaultRegistry returns the singleton global step registry. +func DefaultRegistry() *StepRegistry { + once.Do(func() { + defaultRegistry = NewStepRegistry() + registerDefaultHandlers() + }) + return defaultRegistry +} + +// registerDefaultHandlers registers all built-in step handlers. +// +// To add a new step type, add a registration line here: +// +// defaultRegistry.Register(StepTypeMyCustom, &MyCustomHandler{}) +// +// Make sure your handler implements the StepHandler interface. +func registerDefaultHandlers() { + defaultRegistry.Register(StepTypeUpload, &UploadHandler{}) + defaultRegistry.Register(StepTypeRun, &RunHandler{}) + defaultRegistry.Register(StepTypeProvision, &ProvisionHandler{}) + defaultRegistry.Register(StepTypeBuild, &BuildHandler{}) + defaultRegistry.Register(StepTypeHTTP, &HTTPHandler{}) + defaultRegistry.Register(StepTypeWait, &WaitHandler{}) + defaultRegistry.Register(StepTypeInspect, &InspectHandler{}) + defaultRegistry.Register(StepTypeExposeService, &ExposeServiceHandler{}) + defaultRegistry.Register(StepTypeBitbucket, &BitbucketHandler{}) +} + +// Handler Implementations +// +// Each handler below implements the StepHandler interface by delegating to +// its corresponding Execute* function. The handler adapter pattern allows us +// to maintain the existing function signatures while integrating with the +// registry pattern. + +// UploadHandler implements the handler for upload steps. +type UploadHandler struct{} + +func (h *UploadHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteUpload(t, *step, params.TestdataPath, params.TmplData, params.RepoName, params.Config, params.ProjectName) + return nil +} + +// RunHandler implements the handler for run steps. +type RunHandler struct{} + +func (h *RunHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteRun(t, *step, params.TestdataPath, params.TmplData, params.ProjectName) + return nil +} + +// ProvisionHandler implements the handler for provision steps. +type ProvisionHandler struct{} + +func (h *ProvisionHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteProvision(t, *step, params.TestdataPath, params.TmplData, params.RepoName, params.QuickstarterRepo, params.QuickstarterName, params.Config, params.ProjectName) + return nil +} + +// BuildHandler implements the handler for build steps. +type BuildHandler struct{} + +func (h *BuildHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteBuild(t, *step, params.TestdataPath, params.TmplData, params.RepoName, params.Config, params.ProjectName) + return nil +} + +// HTTPHandler implements the handler for HTTP steps. +type HTTPHandler struct{} + +func (h *HTTPHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteHTTP(t, *step, params.TestdataPath, params.TmplData) + return nil +} + +// WaitHandler implements the handler for wait steps. +type WaitHandler struct{} + +func (h *WaitHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteWait(t, *step, params.TestdataPath, params.TmplData, params.ProjectName) + return nil +} + +// InspectHandler implements the handler for inspect steps. +type InspectHandler struct{} + +func (h *InspectHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteInspect(t, *step, params.TestdataPath, params.TmplData, params.ProjectName) + return nil +} + +// ExposeServiceHandler implements the handler for expose-service steps. +type ExposeServiceHandler struct{} + +func (h *ExposeServiceHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteExposeService(t, *step, params.TmplData, params.ProjectName) + return nil +} + +// BitbucketHandler implements the handler for bitbucket steps. +type BitbucketHandler struct{} + +func (h *BitbucketHandler) Execute(t *testing.T, step *TestStep, params *ExecutionParams) error { + ExecuteBitbucket(t, *step, params.TmplData, params.Config, params.ProjectName, params.TestdataPath) + return nil +} diff --git a/tests/quickstarter/steps/run.go b/tests/quickstarter/steps/run.go new file mode 100644 index 000000000..a7807750b --- /dev/null +++ b/tests/quickstarter/steps/run.go @@ -0,0 +1,83 @@ +package steps + +import ( + "fmt" + "strings" + "testing" + + "github.com/opendevstack/ods-core/tests/utils" +) + +// ExecuteRun handles the run step type. +func ExecuteRun(t *testing.T, step TestStep, testdataPath string, tmplData TemplateData, projectName string) { + if step.RunParams == nil || step.RunParams.File == "" { + t.Fatalf("Missing run parameters, not defined script file.") + } + + fmt.Printf("Executing script: %s\n", step.RunParams.File) + scriptPath := fmt.Sprintf("%s/%s", testdataPath, step.RunParams.File) + + // Build environment variables to pass to script + envVars := buildScriptEnvironment(t, step, tmplData, projectName) + + stdout, stderr, err := utils.RunCommand(scriptPath, []string{}, envVars) + fmt.Printf("%s", stdout) + if err != nil { + t.Fatalf( + "Execution of script:%s failed: \nStdOut: %s\nStdErr: %s\nErr: %s\n", + scriptPath, + stdout, + stderr, + err) + } else { + fmt.Printf("Executed script: %s\n", scriptPath) + } +} + +// buildScriptEnvironment creates environment variables for shell scripts +// This allows scripts to access test context and configuration +func buildScriptEnvironment(t *testing.T, step TestStep, tmplData TemplateData, projectName string) []string { + envVars := []string{ + fmt.Sprintf("COMPONENT_ID=%s", step.ComponentID), + fmt.Sprintf("PROJECT_ID=%s", projectName), + fmt.Sprintf("NAMESPACE=%s-dev", projectName), + } + + // If services are defined in runParams, export each as a named environment variable + // Example: {"api": "api-service", "backend": "backend-service"} becomes + // API_SERVICE_URL and BACKEND_SERVICE_URL + if step.RunParams != nil && len(step.RunParams.Services) > 0 { + for serviceName, actualServiceName := range step.RunParams.Services { + // Render service name template if needed + renderedServiceName := renderTemplate(t, actualServiceName, tmplData) + serviceKey := fmt.Sprintf("ExposedService_%s", renderedServiceName) + if exposedURL, ok := tmplData[serviceKey].(string); ok && exposedURL != "" { + envVarName := fmt.Sprintf("%s_SERVICE_URL", strings.ToUpper(serviceName)) + envVars = append(envVars, fmt.Sprintf("%s=%s", envVarName, exposedURL)) + fmt.Printf(" Setting %s=%s for script (from exposed service)\n", envVarName, exposedURL) + } + } + } else if step.ComponentID != "" { + // Fallback: If no services map defined, export the ComponentID service as SERVICE_URL + // This maintains backward compatibility + renderedComponentID := renderTemplate(t, step.ComponentID, tmplData) + serviceKey := fmt.Sprintf("ExposedService_%s", renderedComponentID) + if exposedURL, ok := tmplData[serviceKey].(string); ok && exposedURL != "" { + envVars = append(envVars, fmt.Sprintf("SERVICE_URL=%s", exposedURL)) + fmt.Printf(" Setting SERVICE_URL=%s for script (from exposed service)\n", exposedURL) + } + } + + // Pass through template data as environment variables + if val, ok := tmplData["OdsNamespace"].(string); ok && val != "" { + envVars = append(envVars, fmt.Sprintf("ODS_NAMESPACE=%s", val)) + } + if val, ok := tmplData["OdsGitRef"].(string); ok && val != "" { + envVars = append(envVars, fmt.Sprintf("ODS_GIT_REF=%s", val)) + } + if val, ok := tmplData["OdsImageTag"].(string); ok && val != "" { + envVars = append(envVars, fmt.Sprintf("ODS_IMAGE_TAG=%s", val)) + } + + return envVars +} diff --git a/tests/quickstarter/steps/template.go b/tests/quickstarter/steps/template.go new file mode 100644 index 000000000..4f0057f72 --- /dev/null +++ b/tests/quickstarter/steps/template.go @@ -0,0 +1,89 @@ +package steps + +import ( + "os" + "strconv" + "strings" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" +) + +// CreateTemplateData creates the template data map for rendering templates +func CreateTemplateData(config map[string]string, componentID string, buildName string, projectName string) TemplateData { + sanitizedOdsGitRef := strings.ReplaceAll(config["ODS_GIT_REF"], "/", "_") + sanitizedOdsGitRef = strings.ReplaceAll(sanitizedOdsGitRef, "-", "_") + var buildNumber string + if len(buildName) > 0 { + buildParts := strings.Split(buildName, "-") + buildNumber = buildParts[len(buildParts)-1] + } + aquaEnabled, _ := strconv.ParseBool(config["AQUA_ENABLED"]) //nolint:errcheck + + // Initialize template data map with standard fields + data := TemplateData{ + "ProjectID": projectName, + "ComponentID": componentID, + "OdsNamespace": config["ODS_NAMESPACE"], + "OdsGitRef": config["ODS_GIT_REF"], + "OdsImageTag": config["ODS_IMAGE_TAG"], + "OdsBitbucketProject": config["ODS_BITBUCKET_PROJECT"], + "SanitizedOdsGitRef": sanitizedOdsGitRef, + "BuildNumber": buildNumber, + "SonarQualityProfile": getEnv("SONAR_QUALITY_PROFILE", "Sonar way"), + "AquaEnabled": aquaEnabled, + } + + // Add all config map entries whose keys don't contain PASSWORD, PASS, or TOKEN + for key, value := range config { + keyUpper := strings.ToUpper(key) + if !strings.Contains(keyUpper, "PASSWORD") && !strings.Contains(keyUpper, "PASS") && !strings.Contains(keyUpper, "TOKEN") { + // Only add if not already present to avoid overwriting standard fields + if _, exists := data[key]; !exists { + data[key] = value + } + } + } + + // Automatically load all environment variables with TMPL_ prefix + // Example: TMPL_MyVariable becomes accessible as {{.MyVariable}} + // We check known TMPL_ variables and also scan all environment variables + tmplVars := []string{ + "TMPL_SonarQualityGate", + "TMPL_SonarQualityProfile", + } + + // First, add any explicitly known TMPL_ variables + for _, tmplVar := range tmplVars { + if value, ok := os.LookupEnv(tmplVar); ok { + key := strings.TrimPrefix(tmplVar, "TMPL_") + data[key] = value + logger.Debug("Loading environment variable: %s -> %s = '%s'", tmplVar, key, value) + } + } + + // Also scan all environment variables for any other TMPL_ prefixed ones + for _, env := range os.Environ() { + if strings.HasPrefix(env, "TMPL_") { + pair := strings.SplitN(env, "=", 2) + if len(pair) == 2 { + key := strings.TrimPrefix(pair[0], "TMPL_") + // Only add if not already added above + if _, exists := data[key]; !exists { + data[key] = pair[1] + logger.Debug("Loading environment variable: %s -> %s = '%s'", pair[0], key, pair[1]) + } + } + } + } + + return data +} + +// getEnv gets an environment variable with a default value +func getEnv(key, defaultValue string) string { + value := os.Getenv(key) + if value == "" { + return defaultValue + } + return value +} diff --git a/tests/quickstarter/steps/types.go b/tests/quickstarter/steps/types.go new file mode 100644 index 000000000..d885ecd4e --- /dev/null +++ b/tests/quickstarter/steps/types.go @@ -0,0 +1,394 @@ +package steps + +import ( + "bytes" + "testing" + "text/template" + + "github.com/opendevstack/ods-core/tests/utils" +) + +// Step type constants +const ( + StepTypeUpload = "upload" + StepTypeRun = "run" + StepTypeProvision = "provision" + StepTypeBuild = "build" + StepTypeHTTP = "http" + StepTypeWait = "wait" + StepTypeInspect = "inspect" + StepTypeExposeService = "expose-service" + StepTypeBitbucket = "bitbucket" +) + +// Default values +const ( + DefaultBranch = "master" + DefaultJenkinsfile = "Jenkinsfile" + DefaultNamespace = "dev" +) + +// Verification strategies +const ( + VerifyStrategyAggregate = "aggregate" + VerifyStrategyFailFast = "fail-fast" +) + +// TestStep describes one step to execute. A step consists of a type (e.g. +// "build"), and the related params for it (e.g. "buildParams"). +type TestStep struct { + // Type of the step - one of "build", "provision", "upload", "run", "http", "wait", "inspect", "expose-service" + Type string `json:"type"` + // Optional description to explain the step's purpose + Description string `json:"description"` + // ComponentID name for that step (overwrites global component name) + ComponentID string `json:"componentID"` + // Skip this step if true (supports templating) + Skip bool `json:"skip"` + // Skip condition - skip this step if this expression evaluates to true (supports templating) + SkipIf string `json:"skipIf"` + // Timeout for this step in seconds (step-specific override) + Timeout int `json:"timeout"` + // Retry configuration for this step + Retry *StepRetryConfig `json:"retry"` + // Hook to execute before this step (file path relative to testdata) + BeforeStep string `json:"beforeStep"` + // Hook to execute after this step (file path relative to testdata) + AfterStep string `json:"afterStep"` + // Parameters for "provison" step type + ProvisionParams *TestStepProvisionParams `json:"provisionParams"` + // Parameters for "build" step type + BuildParams *TestStepBuildParams `json:"buildParams"` + // Parameters for "upload" step type + UploadParams *TestStepUploadParams `json:"uploadParams"` + // Parameters for "run" step type + RunParams *TestStepRunParams `json:"runParams"` + // Parameters for "http" step type + HTTPParams *TestStepHTTPParams `json:"httpParams"` + // Parameters for "wait" step type + WaitParams *TestStepWaitParams `json:"waitParams"` + // Parameters for "inspect" step type + InspectParams *TestStepInspectParams `json:"inspectParams"` + // Parameters for "expose-service" step type + ExposeServiceParams *TestStepExposeServiceParams `json:"exposeServiceParams"` + // Parameters for "bitbucket" step type + BitbucketParams *TestStepBitbucketParams `json:"bitbucketParams"` +} + +// StepRetryConfig defines retry behavior for a step. +type StepRetryConfig struct { + // Number of retry attempts (0 = no retries) + Attempts int `json:"attempts"` + // Delay between retries (e.g., "2s", "500ms") + Delay string `json:"delay"` + // Only retry on transient errors (timeout, connection issues) + OnlyTransient bool `json:"onlyTransient"` +} + +// TestStepRunParams defines the parameters for the "run" step type. +type TestStepRunParams struct { + // File to execute relative to "testdata" directory + File string `json:"file"` + // Services is an optional map of service name aliases to actual service names. + // This allows scripts to access multiple exposed services via named environment variables. + // Example: {"backend": "my-backend-svc", "frontend": "{{.ComponentID}}-frontend"} + // Will create: BACKEND_SERVICE_URL and FRONTEND_SERVICE_URL environment variables. + Services map[string]string `json:"services"` +} + +// TestStepUploadParams defines the parameters for the "upload" step type. +type TestStepUploadParams struct { + // File to add, commit and push to the repository (relative to "testdata" directory) + File string `json:"file"` + // Name of the uploaded file in the repository. Defaults to just the filename of +File+. + Filename string `json:"filename"` + // In case this is a template file that we want to render. + Render bool `json:"render"` + // In case we want to override the repository, it is relative to the project where we run it. + Repository string `json:"repository"` +} + +// TestStepProvisionParams defines the parameters for the "provision" step type. +type TestStepProvisionParams struct { + // Name of the quickstarter to provision. + Quickstarter string `json:"quickstarter"` + // Pipeline allows to customize the pipeline name. + // If empty, the pipeline name is generated. + Pipeline string `json:"pipeline"` + // Quickstarter branch for which to run the pipeline. + // For "provision" steps, it defaults to ODS_GIT_REF. + // For "build" steps, it defaults to "master". + Branch string `json:"branch"` + // Jenkins Agent image tag. + // Defaults to ODS_IMAGE_TAG. + AgentImageTag string `json:"agentImageTag"` + // Jenkins Shared library Git reference. + // Defaults to AgentImageTag. + SharedLibraryRef string `json:"sharedLibraryRef"` + // Additional environment variables + Env []utils.EnvPair `json:"env"` + // Verify parameters. + Verify *TestStepVerify `json:"verify"` + // In case we want to override the repository, it is relative to the project where we run it. + Repository string `json:"repository"` + // Extra resources to remove + TestResourcesCleanUp []struct { + // Type of the resource + ResourceType string `json:"resourceType"` + // Name of the resource + ResourceName string `json:"resourceName"` + // Namespace + Namespace string `json:"namespace"` + } `json:"testResourcesCleanUp"` +} + +// TestStepBuildParams defines the parameters for the "build" step type. +type TestStepBuildParams TestStepProvisionParams + +// TestStepHTTPParams defines the parameters for the "http" step type. +type TestStepHTTPParams struct { + // URL to test (supports templating) + URL string `json:"url"` + // HTTP method (GET, POST, PUT, DELETE, etc.) + Method string `json:"method"` + // Request headers + Headers map[string]string `json:"headers"` + // Request body + Body string `json:"body"` + // Expected HTTP status code + ExpectedStatus int `json:"expectedStatus"` + // Path to golden file with expected response body (relative to "testdata" directory) + ExpectedBody string `json:"expectedBody"` + // JSONPath assertions + Assertions []HTTPAssertion `json:"assertions"` + // Timeout in seconds + Timeout int `json:"timeout"` + // Retry configuration + Retry *HTTPRetry `json:"retry"` +} + +// HTTPAssertion defines a JSONPath-based assertion +type HTTPAssertion struct { + // JSONPath expression + Path string `json:"path"` + // Expected value (exact match) + Equals interface{} `json:"equals"` + // Check if path exists + Exists *bool `json:"exists"` + // Check if value contains string + Contains string `json:"contains"` + // Check if value matches regex + Matches string `json:"matches"` +} + +// HTTPRetry defines retry configuration for HTTP requests +type HTTPRetry struct { + // Number of retry attempts + Attempts int `json:"attempts"` + // Delay between retries (e.g., "2s", "500ms") + Delay string `json:"delay"` +} + +// TestStepWaitParams defines the parameters for the "wait" step type. +type TestStepWaitParams struct { + // Type of wait condition + Condition string `json:"condition"` + // Resource to wait for (for OpenShift resources) + Resource string `json:"resource"` + // URL to wait for (for http-accessible condition) + URL string `json:"url"` + // Log message to wait for (for log-contains condition) + Message string `json:"message"` + // Namespace (defaults to {{.ProjectID}}-dev) + Namespace string `json:"namespace"` + // Timeout duration (e.g., "300s", "5m") + Timeout string `json:"timeout"` + // Polling interval (e.g., "5s") + Interval string `json:"interval"` +} + +// TestStepInspectParams defines the parameters for the "inspect" step type. +type TestStepInspectParams struct { + // Resource to inspect (e.g., "deployment/my-app", "pod/my-pod") + Resource string `json:"resource"` + // Namespace (defaults to {{.ProjectID}}-dev) + Namespace string `json:"namespace"` + // Checks to perform + Checks *InspectChecks `json:"checks"` +} + +// InspectChecks defines what to check in the container +type InspectChecks struct { + // Log content checks + Logs *LogChecks `json:"logs"` + // Environment variable checks + Env map[string]string `json:"env"` + // Resource limit checks + Resources *ResourceChecks `json:"resources"` +} + +// LogChecks defines log content assertions +type LogChecks struct { + // Log should contain these strings + Contains []string `json:"contains"` + // Log should NOT contain these strings + NotContains []string `json:"notContains"` + // Log should match these regex patterns + Matches []string `json:"matches"` +} + +// ResourceChecks defines resource limit/request checks +type ResourceChecks struct { + // Resource limits + Limits *ResourceValues `json:"limits"` + // Resource requests + Requests *ResourceValues `json:"requests"` +} + +// ResourceValues defines CPU and memory values +type ResourceValues struct { + CPU string `json:"cpu"` + Memory string `json:"memory"` +} + +// TestStepExposeServiceParams defines the parameters for the "expose-service" step type. +type TestStepExposeServiceParams struct { + // Services to expose (list of service configurations) + Services []ExposeServiceConfig `json:"services"` +} + +// ExposeServiceConfig defines a single service to expose +type ExposeServiceConfig struct { + // Name of the service to expose + ServiceName string `json:"serviceName"` + // Namespace where the service is located (supports templating, defaults to {{.ProjectID}}-dev) + Namespace string `json:"namespace"` + // Port to expose (defaults to 8080) + Port string `json:"port"` +} + +// TestStepVerify defines the items to verify. +type TestStepVerify struct { + // Verification strategy: "aggregate" (default) collects all failures; "fail-fast" stops on first. + Strategy string `json:"strategy"` + // JSON file defining expected Jenkins stages (relative to "testdata" directory). + JenkinsStages string `json:"jenkinsStages"` + // JSON file defining expected Sonar scan result (relative to "testdata" directory). + SonarScan string `json:"sonarScan"` + // Names of expected attachments to the Jenkins run. + RunAttachments []string `json:"runAttachments"` + // Number of expected test results. + TestResults int `json:"testResults"` + // Expected OpenShift resources in the *-dev namespace. + OpenShiftResources *struct { + // Namespace in which to look for resources (defaults to *-dev). + Namespace string `json:"namespace"` + // Image tags + ImageTags []struct { + // Name of the image + Name string `json:"name"` + // Tag of the image + Tag string `json:"tag"` + } `json:"imageTags"` + // BuildConfig resources (OpenShift) + BuildConfigs []string `json:"buildConfigs"` + // ImageStream resources (OpenShift) + ImageStreams []string `json:"imageStreams"` + // DeploymentConfig resources (OpenShift) + DeploymentConfigs []string `json:"deploymentConfigs"` + // Deployment resources (Kubernetes) + Deployments []string `json:"deployments"` + // StatefulSet resources (Kubernetes) + StatefulSets []string `json:"statefulSets"` + // DaemonSet resources (Kubernetes) + DaemonSets []string `json:"daemonSets"` + // ReplicaSet resources (Kubernetes) + ReplicaSets []string `json:"replicaSets"` + // Service resources. The check includes verifying that a running, ready pod is assigned. + Services []string `json:"services"` + // Route resources (OpenShift) + Routes []string `json:"routes"` + // Ingress resources (Kubernetes) + Ingresses []string `json:"ingresses"` + // ConfigMap resources + ConfigMaps []string `json:"configMaps"` + // Secret resources + Secrets []string `json:"secrets"` + // PersistentVolumeClaim resources + PersistentVolumeClaims []string `json:"persistentVolumeClaims"` + // ServiceAccount resources + ServiceAccounts []string `json:"serviceAccounts"` + // Role resources + Roles []string `json:"roles"` + // RoleBinding resources + RoleBindings []string `json:"roleBindings"` + // NetworkPolicy resources + NetworkPolicies []string `json:"networkPolicies"` + // Job resources + Jobs []string `json:"jobs"` + // CronJob resources + CronJobs []string `json:"cronJobs"` + // Pod resources + Pods []string `json:"pods"` + // HorizontalPodAutoscaler resources + HorizontalPodAutoscalers []string `json:"horizontalPodAutoscalers"` + } `json:"openShiftResources"` +} + +type TestStepBitbucketParams struct { + // Action to perform: "recreate-repo", "approve-pr", "get-pullrequest", "delete-files", or "upload-file" + Action string `json:"action"` + // Repository slug + Repository string `json:"repository"` + // Project key (required for recreate-repo, optional for approve-pr, delete-files, and upload-file) + Project string `json:"project"` + // Pull request ID (required for approve-pr and get-pullrequest actions) + PullRequestID string `json:"pullRequestID"` + // Reviewer username to add as reviewer (optional for approve-pr action) + Reviewer string `json:"reviewer"` + // Paths to delete (required for delete-files action, supports templating) + Paths []string `json:"paths"` + // Commit message for delete-files action (defaults to "Remove files/folders") + CommitMessage string `json:"commitMessage"` + // File to upload (relative to "testdata" directory) for upload-file action + File string `json:"file"` + // Name of the uploaded file in the repository (defaults to the basename of File) + Filename string `json:"filename"` + // Render the file as a Go template before uploading (upload-file action) + Render bool `json:"render"` + // Verify contains verification rules for PR content + Verify *TestStepBitbucketVerify `json:"verify"` +} + +// TestStepBitbucketVerify defines verification rules for Bitbucket operations +type TestStepBitbucketVerify struct { + // PRChecks contains JSON path-based checks for PR content + PRChecks map[string]interface{} `json:"prChecks"` +} + +// TemplateData holds template parameters. Those will be applied to all +// values defined in the steps, as they are treated as Go templates. +type TemplateData map[string]interface{} + +// renderTemplate applies template data to a template string +func renderTemplate(t *testing.T, tpl string, tmplData TemplateData) string { + var buffer bytes.Buffer + tmpl, err := template.New("template").Parse(tpl) + if err != nil { + t.Fatalf("Error parsing template: %s", err) + } + tmplErr := tmpl.Execute(&buffer, tmplData) + if tmplErr != nil { + t.Fatalf("Error rendering template: %s", tmplErr) + } + return buffer.String() +} + +// renderTemplates applies template data to multiple template strings +func renderTemplates(t *testing.T, tpls []string, tmplData TemplateData) []string { + rendered := []string{} + for _, tpl := range tpls { + rendered = append(rendered, renderTemplate(t, tpl, tmplData)) + } + return rendered +} diff --git a/tests/quickstarter/steps/upload.go b/tests/quickstarter/steps/upload.go new file mode 100644 index 000000000..d417d72cd --- /dev/null +++ b/tests/quickstarter/steps/upload.go @@ -0,0 +1,14 @@ +package steps + +import ( + "testing" +) + +// ExecuteUpload handles the upload step type. +func ExecuteUpload(t *testing.T, step TestStep, testdataPath string, tmplData TemplateData, repoName string, config map[string]string, projectName string) { + if step.UploadParams == nil { + t.Fatalf("Missing upload parameters.") + } + + uploadFileToBitbucket(t, step.UploadParams, tmplData, testdataPath, repoName, projectName, config) +} diff --git a/tests/quickstarter/steps/url_resolver.go b/tests/quickstarter/steps/url_resolver.go new file mode 100644 index 000000000..ca9c80c7d --- /dev/null +++ b/tests/quickstarter/steps/url_resolver.go @@ -0,0 +1,258 @@ +package steps + +import ( + "fmt" + "os" + "os/exec" + "regexp" + "strings" + "sync" + "testing" + "time" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" +) + +// RouteCache caches route lookups to avoid repeated queries +type RouteCache struct { + routes map[string]string // key: namespace/service -> value: route URL + mu sync.RWMutex +} + +var routeCache = &RouteCache{ + routes: make(map[string]string), +} + +// ServiceURL represents a parsed internal service URL +type ServiceURL struct { + ServiceName string + Namespace string + Port string + Path string + Scheme string +} + +// ResolveServiceURL intelligently resolves a service URL based on execution environment +// Strategy: +// 1. If URL is not an internal service URL (.svc.cluster.local), return as-is +// 2. Check if a route exists for the service -> use route URL +// 3. If running in cluster (Jenkins) -> use internal service DNS +// 4. If running locally -> setup port-forward and use localhost +func ResolveServiceURL(t *testing.T, rawURL string, tmplData TemplateData) string { + // First, render any template variables + rendered := renderTemplate(t, rawURL, tmplData) + + // Check if this is an internal service URL + if !isInternalServiceURL(rendered) { + // Already an external URL (http://, https://, or route) + return rendered + } + + // Parse the internal service URL + serviceURL, err := parseInternalServiceURL(rendered) + if err != nil { + // Can't parse, return as-is and let it fail naturally + logger.Warn("Could not parse service URL: %s (error: %v)", rendered, err) + return rendered + } + + logger.Running(fmt.Sprintf("Resolving service URL: %s/%s:%s", serviceURL.Namespace, serviceURL.ServiceName, serviceURL.Port)) + + // Strategy 1: Try to get route (works everywhere) + if routeURL := getRouteURL(serviceURL.ServiceName, serviceURL.Namespace); routeURL != "" { + finalURL := routeURL + serviceURL.Path + logger.Success(fmt.Sprintf("Using route: %s", finalURL)) + return finalURL + } + + // Strategy 2: If in cluster, use service DNS + if isRunningInCluster() { + logger.Success(fmt.Sprintf("Running in cluster, using service DNS: %s", rendered)) + return rendered + } + + // Strategy 3: Local development - setup port-forward + logger.Running("Running locally, setting up port-forward...") + localPort, err := EnsurePortForward(serviceURL.ServiceName, serviceURL.Namespace, serviceURL.Port) + if err != nil { + if t != nil { + t.Fatalf("Failed to setup port-forward for %s/%s:%s: %v", + serviceURL.Namespace, serviceURL.ServiceName, serviceURL.Port, err) + } + panic(fmt.Sprintf("Failed to setup port-forward: %v", err)) + } + + localURL := fmt.Sprintf("http://localhost:%d%s", localPort, serviceURL.Path) + logger.Success(fmt.Sprintf("Using port-forward: %s", localURL)) + return localURL +} + +// isInternalServiceURL checks if a URL is an internal Kubernetes service URL +func isInternalServiceURL(url string) bool { + return strings.Contains(url, ".svc.cluster.local") || strings.Contains(url, ".svc:") +} + +// parseInternalServiceURL parses an internal service URL into its components +// Expected format: http://service.namespace.svc.cluster.local:port/path +// Also supports: http://service.namespace.svc:port/path +func parseInternalServiceURL(url string) (*ServiceURL, error) { + // Pattern to match service URLs + // Group 1: scheme (http/https) + // Group 2: service name + // Group 3: namespace + // Group 4: port + // Group 5: path (optional) + pattern := `^(https?)://([^.]+)\.([^.]+)\.svc(?:\.cluster\.local)?:(\d+)(.*)$` + re := regexp.MustCompile(pattern) + + matches := re.FindStringSubmatch(url) + if matches == nil { + return nil, fmt.Errorf("URL does not match expected service URL format") + } + + return &ServiceURL{ + Scheme: matches[1], + ServiceName: matches[2], + Namespace: matches[3], + Port: matches[4], + Path: matches[5], + }, nil +} + +// isRunningInCluster detects if we're running inside a Kubernetes/OpenShift cluster +func isRunningInCluster() bool { + // Check for Kubernetes service environment variable + if os.Getenv("KUBERNETES_SERVICE_HOST") != "" { + return true + } + + // Check for service account token (mounted in pods) + if _, err := os.Stat("/var/run/secrets/kubernetes.io/serviceaccount/token"); err == nil { + return true + } + + return false +} + +// getRouteURL queries OpenShift for a route and returns the URL if it exists +func getRouteURL(serviceName, namespace string) string { + cacheKey := fmt.Sprintf("%s/%s", namespace, serviceName) + + // Check cache first + routeCache.mu.RLock() + if cached, exists := routeCache.routes[cacheKey]; exists { + routeCache.mu.RUnlock() + return cached + } + routeCache.mu.RUnlock() + + // Query OpenShift for route + result := queryRoute(serviceName, namespace) + + // Cache the result (even if empty, to avoid repeated failed queries) + routeCache.mu.Lock() + routeCache.routes[cacheKey] = result + routeCache.mu.Unlock() + + return result +} + +// queryRoute performs the actual OpenShift route query +func queryRoute(serviceName, namespace string) string { + // Get route host + cmd := exec.Command("oc", "get", "route", serviceName, + "-n", namespace, + "-o", "jsonpath={.spec.host}", + "--ignore-not-found") + + output, err := cmd.CombinedOutput() + if err != nil { + // Route doesn't exist or error querying + return "" + } + + host := strings.TrimSpace(string(output)) + if host == "" { + return "" + } + + // Check if route uses TLS + tlsCmd := exec.Command("oc", "get", "route", serviceName, + "-n", namespace, + "-o", "jsonpath={.spec.tls.termination}", + "--ignore-not-found") + + tlsOutput, _ := tlsCmd.CombinedOutput() //nolint:errcheck + tlsTermination := strings.TrimSpace(string(tlsOutput)) + + // Determine scheme + scheme := "http" + if tlsTermination != "" { + scheme = "https" + } + + return fmt.Sprintf("%s://%s", scheme, host) +} + +// ClearRouteCache clears the route cache (useful for testing) +func ClearRouteCache() { + routeCache.mu.Lock() + defer routeCache.mu.Unlock() + routeCache.routes = make(map[string]string) +} + +// WaitForServiceReady waits for a service to become accessible +// This is useful after a build step completes, before running HTTP tests +func WaitForServiceReady(serviceName, namespace string, timeout time.Duration) error { + logger.Waiting(fmt.Sprintf("Waiting for service %s/%s to be ready...", namespace, serviceName)) + + deadline := time.Now().Add(timeout) + attempt := 0 + + for time.Now().Before(deadline) { + attempt++ + + // Check if service exists + cmd := exec.Command("oc", "get", "service", serviceName, + "-n", namespace, + "--ignore-not-found", + "-o", "jsonpath={.metadata.name}") + + output, err := cmd.CombinedOutput() + if err == nil && strings.TrimSpace(string(output)) == serviceName { + logger.Success(fmt.Sprintf("Service is ready after %d check(s)", attempt)) + return nil + } + + time.Sleep(2 * time.Second) + } + + return fmt.Errorf("service %s/%s did not become ready within %v", namespace, serviceName, timeout) +} + +// GetServicePort retrieves the primary port for a service +func GetServicePort(serviceName, namespace string) (string, error) { + cmd := exec.Command("oc", "get", "service", serviceName, + "-n", namespace, + "-o", "jsonpath={.spec.ports[0].port}") + + output, err := cmd.CombinedOutput() + if err != nil { + return "", fmt.Errorf("failed to get service port: %w (output: %s)", err, string(output)) + } + + port := strings.TrimSpace(string(output)) + if port == "" { + return "", fmt.Errorf("service has no ports defined") + } + + return port, nil +} + +// ConstructServiceURL builds a standard internal service URL +func ConstructServiceURL(serviceName, namespace, port, path string) string { + if !strings.HasPrefix(path, "/") && path != "" { + path = "/" + path + } + return fmt.Sprintf("http://%s.%s.svc.cluster.local:%s%s", serviceName, namespace, port, path) +} diff --git a/tests/quickstarter/steps/verification.go b/tests/quickstarter/steps/verification.go new file mode 100644 index 000000000..da80a5f62 --- /dev/null +++ b/tests/quickstarter/steps/verification.go @@ -0,0 +1,250 @@ +package steps + +import ( + b64 "encoding/base64" + "fmt" + "regexp" + "strconv" + "strings" + "testing" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" + "github.com/opendevstack/ods-core/tests/utils" +) + +// VerificationContext holds context for verifications +type VerificationContext struct { + TestdataPath string + RepoName string + BuildName string + Config map[string]string + TmplData TemplateData + ProjectName string +} + +// verifyPipelineRun checks that all expected values from the TestStepVerify +// definition are present. +func verifyPipelineRun(t *testing.T, step TestStep, verify *TestStepVerify, testdataPath string, repoName string, buildName string, config map[string]string, projectName string) { + if verify == nil { + logger.Info("No verification defined for build: %s", buildName) + return + } + + logger.StepVerification(fmt.Sprintf("pipeline run: %s", buildName)) + + projectNameCD := fmt.Sprintf("%s-cd", projectName) + + ctx := VerificationContext{ + TestdataPath: testdataPath, + RepoName: repoName, + BuildName: buildName, + Config: config, + TmplData: CreateTemplateData(config, step.ComponentID, buildName, projectName), + ProjectName: projectName, + } + + strategy := strings.ToLower(strings.TrimSpace(verify.Strategy)) + if strategy == "" { + strategy = VerifyStrategyAggregate + } + if strategy != VerifyStrategyAggregate && strategy != VerifyStrategyFailFast { + logger.Warn("Unknown verify strategy %q, defaulting to aggregate", strategy) + strategy = VerifyStrategyAggregate + } + + logger.KeyValue("Verification Strategy", strategy) + + var errs []string + runCheck := func(name string, fn func() error) { + if fn == nil { + return + } + if strategy == VerifyStrategyFailFast { + if err := fn(); err != nil { + logger.Failure(fmt.Sprintf("Verification: %s", name), err) + t.Fatalf("Verification %s failed: %v", name, err) + } + return + } + if err := fn(); err != nil { + errs = append(errs, fmt.Sprintf("%s: %v", name, err)) + } + } + + runCheck("jenkins stages", func() error { + if len(verify.JenkinsStages) == 0 { + return nil + } + return verifyJenkinsStages(t, step, verify, ctx, projectNameCD) + }) + runCheck("sonar scan", func() error { + if len(verify.SonarScan) == 0 { + return nil + } + return verifySonarScan(t, step, verify, ctx) + }) + runCheck("run attachments", func() error { + if len(verify.RunAttachments) == 0 { + return nil + } + return verifyRunAttachments(t, verify, ctx, projectNameCD) + }) + runCheck("test results", func() error { + if verify.TestResults == 0 { + return nil + } + return verifyTestResults(t, verify, ctx, projectNameCD) + }) + runCheck("openshift resources", func() error { + if verify.OpenShiftResources == nil { + return nil + } + return verifyOpenShiftResources(t, step, verify, ctx, projectName) + }) + + if strategy == VerifyStrategyAggregate && len(errs) > 0 { + msg := fmt.Sprintf("Verification failed with %d issue(s):\n- %s", len(errs), strings.Join(errs, "\n- ")) + t.Fatal(msg) + } +} + +// verifyJenkinsStages verifies Jenkins stages +func verifyJenkinsStages(t *testing.T, step TestStep, verify *TestStepVerify, ctx VerificationContext, projectNameCD string) error { + logger.Waiting(fmt.Sprintf("Jenkins stages of %s", ctx.BuildName)) + stages, err := utils.RetrieveJenkinsBuildStagesForBuild(projectNameCD, ctx.BuildName) + if err != nil { + logger.Failure(fmt.Sprintf("Retrieve Jenkins stages for %s", ctx.BuildName), err) + return err + } + return verifyJSONGoldenFile( + step.ComponentID, + fmt.Sprintf("%s/%s", ctx.TestdataPath, verify.JenkinsStages), + stages, + ctx.TmplData, + ) +} + +// verifySonarScan verifies the Sonar scan +func verifySonarScan(t *testing.T, step TestStep, verify *TestStepVerify, ctx VerificationContext) error { + logger.Waiting(fmt.Sprintf("Sonar scan of %s", ctx.BuildName)) + sonarscan, err := retrieveSonarScan(ctx.RepoName, ctx.Config) + if err != nil { + return err + } + return verifyJSONGoldenFile( + step.ComponentID, + fmt.Sprintf("%s/%s", ctx.TestdataPath, verify.SonarScan), + sonarscan, + ctx.TmplData, + ) +} + +// verifyRunAttachments verifies run attachments +func verifyRunAttachments(t *testing.T, verify *TestStepVerify, ctx VerificationContext, projectNameCD string) error { + logger.Waiting(fmt.Sprintf("Jenkins run attachments of %s", ctx.BuildName)) + artifactsToVerify := []string{} + for _, a := range verify.RunAttachments { + artifact := renderTemplate(t, a, ctx.TmplData) + artifactsToVerify = append(artifactsToVerify, artifact) + } + return utils.VerifyJenkinsRunAttachments(projectNameCD, ctx.BuildName, artifactsToVerify) +} + +// verifyTestResults verifies test results +func verifyTestResults(t *testing.T, verify *TestStepVerify, ctx VerificationContext, projectNameCD string) error { + logger.Waiting(fmt.Sprintf("Unit tests of %s", ctx.BuildName)) + stdout, stderr, err := utils.RunScriptFromBaseDir("tests/scripts/print-jenkins-unittest-results.sh", []string{ + projectNameCD, + ctx.BuildName, + }, []string{}) + if err != nil { + return fmt.Errorf("could not find unit tests for build:%s\nstdout: %s\nstderr:%s\nerr: %s", ctx.BuildName, stdout, stderr, err) + } + r := regexp.MustCompile("([0-9]+) tests") + match := r.FindStringSubmatch(stdout) + if match == nil { + return fmt.Errorf("could not find any unit tests for build:%s\nstdout: %s\nstderr:%s\nerr: %s", ctx.BuildName, stdout, stderr, err) + } + foundTests, err := strconv.Atoi(match[1]) + if err != nil { + return fmt.Errorf("could not convert number of unit tests to int: %w", err) + } + if foundTests < verify.TestResults { + return fmt.Errorf("expected %d unit tests, but found only %d for build:%s", verify.TestResults, foundTests, ctx.BuildName) + } + return nil +} + +// verifyOpenShiftResources verifies OpenShift resources +func verifyOpenShiftResources(t *testing.T, step TestStep, verify *TestStepVerify, ctx VerificationContext, projectName string) error { + projectNameDev := fmt.Sprintf("%s-dev", projectName) + ocNamespace := projectNameDev + + // Use custom namespace if specified, otherwise default to dev + if verify.OpenShiftResources.Namespace != "" { + customNamespace := renderTemplate(t, verify.OpenShiftResources.Namespace, ctx.TmplData) + // If it doesn't contain the project prefix, add it + if !strings.HasPrefix(customNamespace, projectName) { + ocNamespace = fmt.Sprintf("%s-%s", projectName, customNamespace) + } else { + ocNamespace = customNamespace + } + } + + logger.Waiting(fmt.Sprintf("OpenShift resources of %s in %s", step.ComponentID, ocNamespace)) + + imageTags := []utils.ImageTag{} + + resources := utils.Resources{ + Namespace: ocNamespace, + ImageTags: imageTags, + BuildConfigs: renderTemplates(t, verify.OpenShiftResources.BuildConfigs, ctx.TmplData), + DeploymentConfigs: renderTemplates(t, verify.OpenShiftResources.DeploymentConfigs, ctx.TmplData), + Deployments: renderTemplates(t, verify.OpenShiftResources.Deployments, ctx.TmplData), + StatefulSets: renderTemplates(t, verify.OpenShiftResources.StatefulSets, ctx.TmplData), + DaemonSets: renderTemplates(t, verify.OpenShiftResources.DaemonSets, ctx.TmplData), + ReplicaSets: renderTemplates(t, verify.OpenShiftResources.ReplicaSets, ctx.TmplData), + Services: renderTemplates(t, verify.OpenShiftResources.Services, ctx.TmplData), + ImageStreams: renderTemplates(t, verify.OpenShiftResources.ImageStreams, ctx.TmplData), + Routes: renderTemplates(t, verify.OpenShiftResources.Routes, ctx.TmplData), + Ingresses: renderTemplates(t, verify.OpenShiftResources.Ingresses, ctx.TmplData), + ConfigMaps: renderTemplates(t, verify.OpenShiftResources.ConfigMaps, ctx.TmplData), + Secrets: renderTemplates(t, verify.OpenShiftResources.Secrets, ctx.TmplData), + PersistentVolumeClaims: renderTemplates(t, verify.OpenShiftResources.PersistentVolumeClaims, ctx.TmplData), + ServiceAccounts: renderTemplates(t, verify.OpenShiftResources.ServiceAccounts, ctx.TmplData), + Roles: renderTemplates(t, verify.OpenShiftResources.Roles, ctx.TmplData), + RoleBindings: renderTemplates(t, verify.OpenShiftResources.RoleBindings, ctx.TmplData), + NetworkPolicies: renderTemplates(t, verify.OpenShiftResources.NetworkPolicies, ctx.TmplData), + Jobs: renderTemplates(t, verify.OpenShiftResources.Jobs, ctx.TmplData), + CronJobs: renderTemplates(t, verify.OpenShiftResources.CronJobs, ctx.TmplData), + Pods: renderTemplates(t, verify.OpenShiftResources.Pods, ctx.TmplData), + HorizontalPodAutoscalers: renderTemplates(t, verify.OpenShiftResources.HorizontalPodAutoscalers, ctx.TmplData), + } + + utils.CheckResources(resources, t) + return nil +} + +// retrieveSonarScan retrieves a Sonar scan result +func retrieveSonarScan(projectKey string, config map[string]string) (string, error) { + logger.Running(fmt.Sprintf("Retrieving Sonar scan for: %s", projectKey)) + + sonartoken, err := b64.StdEncoding.DecodeString(config["SONAR_AUTH_TOKEN_B64"]) + if err != nil { + return "", fmt.Errorf("failed to decode Sonar auth token: %w", err) + } + + stdout, stderr, err := utils.RunScriptFromBaseDir("tests/scripts/print-sonar-scan-run.sh", []string{ + string(sonartoken), + config["SONARQUBE_URL"], + projectKey, + }, []string{}) + + if err != nil { + logger.Error("Execution of print-sonar-scan-run.sh failed: stdout=%s, stderr=%s", stdout, stderr) + return "", err + } + logger.Success("Sonar scan result retrieved") + + return stdout, nil +} diff --git a/tests/quickstarter/steps/wait.go b/tests/quickstarter/steps/wait.go new file mode 100644 index 000000000..84709a657 --- /dev/null +++ b/tests/quickstarter/steps/wait.go @@ -0,0 +1,295 @@ +package steps + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + "testing" + "time" + + "github.com/opendevstack/ods-core/tests/quickstarter/logger" +) + +// Wait condition constants +const ( + WaitConditionPodReady = "pod-ready" + WaitConditionDeploymentComplete = "deployment-complete" + WaitConditionJobComplete = "job-complete" + WaitConditionRouteAccessible = "route-accessible" + WaitConditionHTTPAccessible = "http-accessible" + WaitConditionLogContains = "log-contains" +) + +// ExecuteWait handles the wait step type for waiting on asynchronous operations. +func ExecuteWait(t *testing.T, step TestStep, testdataPath string, tmplData TemplateData, projectName string) { + if step.WaitParams == nil { + t.Fatal("Missing wait parameters") + } + + params := step.WaitParams + + // Default namespace to {project}-dev + namespace := params.Namespace + if namespace == "" { + namespace = fmt.Sprintf("%s-dev", projectName) + } + namespace = renderTemplate(t, namespace, tmplData) + logger.KeyValue("Namespace", namespace) + + // Default timeout to 300s + timeout := params.Timeout + if timeout == "" { + timeout = "300s" + } + timeoutDuration, err := time.ParseDuration(timeout) + if err != nil { + t.Fatalf("Invalid timeout duration: %s", timeout) + } + logger.KeyValue("Timeout", timeout) + + // Default interval to 5s + interval := params.Interval + if interval == "" { + interval = "5s" + } + intervalDuration, err := time.ParseDuration(interval) + if err != nil { + t.Fatalf("Invalid interval duration: %s", interval) + } + logger.KeyValue("Interval", interval) + + logger.SubSection(fmt.Sprintf("Waiting for condition: %s", params.Condition)) + + // Execute the wait based on condition type + switch params.Condition { + case WaitConditionPodReady: + if err := waitForPodReady(t, params, namespace, tmplData, timeoutDuration, intervalDuration); err != nil { + logger.Failure(fmt.Sprintf("Pod ready condition for %s", params.Resource), err) + t.Fatal(err) + } + case WaitConditionDeploymentComplete: + if err := waitForDeploymentComplete(t, params, namespace, tmplData, timeoutDuration, intervalDuration); err != nil { + logger.Failure(fmt.Sprintf("Deployment completion for %s", params.Resource), err) + t.Fatal(err) + } + case WaitConditionJobComplete: + if err := waitForJobComplete(t, params, namespace, tmplData, timeoutDuration, intervalDuration); err != nil { + logger.Failure(fmt.Sprintf("Job completion for %s", params.Resource), err) + t.Fatal(err) + } + case WaitConditionRouteAccessible: + if err := waitForRouteAccessible(t, params, namespace, tmplData, timeoutDuration, intervalDuration); err != nil { + logger.Failure(fmt.Sprintf("Route accessibility for %s", params.Resource), err) + t.Fatal(err) + } + case WaitConditionHTTPAccessible: + if err := waitForHTTPAccessible(t, params, tmplData, timeoutDuration, intervalDuration); err != nil { + logger.Failure(fmt.Sprintf("HTTP accessibility for %s", params.URL), err) + t.Fatal(err) + } + case WaitConditionLogContains: + if err := waitForLogContains(t, params, namespace, tmplData, timeoutDuration, intervalDuration); err != nil { + logger.Failure(fmt.Sprintf("Log contains message: %s", params.Message), err) + t.Fatal(err) + } + default: + t.Fatalf("Unknown wait condition: %s", params.Condition) + } + + logger.Success(fmt.Sprintf("Condition met: %s", params.Condition)) +} + +// waitForPodReady waits for pods to be ready +func waitForPodReady(t *testing.T, params *TestStepWaitParams, namespace string, tmplData TemplateData, timeout, interval time.Duration) error { + resource := renderTemplate(t, params.Resource, tmplData) + + logger.Waiting(fmt.Sprintf("Pod to be ready: %s in namespace %s", resource, namespace)) + + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + // Use oc wait command + logger.Debug("Executing command: oc wait --for=condition=Ready %s -n %s --timeout=%s", resource, namespace, interval.String()) + cmd := exec.Command("oc", "wait", "--for=condition=Ready", "pod", resource, + "-n", namespace, + "--timeout="+interval.String()) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err == nil { + return nil + } + + // Check if it's a timeout (continue waiting) or a real error + if !strings.Contains(stderr.String(), "timed out") { + logger.Warn("oc wait command error: %s (retrying...)", stderr.String()) + } + + time.Sleep(interval) + } + + return fmt.Errorf("timeout waiting for pod to be ready: %s", resource) +} + +// waitForDeploymentComplete waits for a deployment to complete +func waitForDeploymentComplete(t *testing.T, params *TestStepWaitParams, namespace string, tmplData TemplateData, timeout, interval time.Duration) error { + resource := renderTemplate(t, params.Resource, tmplData) + logger.Waiting(fmt.Sprintf("Deployment to complete: %s in namespace %s", resource, namespace)) + + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + // Check deployment status + cmd := exec.Command("oc", "rollout", "status", resource, + "-n", namespace, + "--timeout="+interval.String()) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err == nil { + return nil + } + + time.Sleep(interval) + } + + return fmt.Errorf("timeout waiting for deployment to complete: %s", resource) +} + +// waitForJobComplete waits for a job to complete +func waitForJobComplete(t *testing.T, params *TestStepWaitParams, namespace string, tmplData TemplateData, timeout, interval time.Duration) error { + resource := renderTemplate(t, params.Resource, tmplData) + logger.Waiting(fmt.Sprintf("Job to complete: %s in namespace %s", resource, namespace)) + + deadline := time.Now().Add(timeout) + for time.Now().Before(deadline) { + cmd := exec.Command("oc", "wait", "--for=condition=complete", resource, + "-n", namespace, + "--timeout="+interval.String()) + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err == nil { + return nil + } + + time.Sleep(interval) + } + + return fmt.Errorf("timeout waiting for job to complete: %s", resource) +} + +// waitForRouteAccessible waits for a route to be accessible +func waitForRouteAccessible(t *testing.T, params *TestStepWaitParams, namespace string, tmplData TemplateData, timeout, interval time.Duration) error { + resource := renderTemplate(t, params.Resource, tmplData) + + // Extract route name from resource (e.g., "route/myapp" -> "myapp") + routeName := resource + if strings.Contains(resource, "/") { + parts := strings.Split(resource, "/") + routeName = parts[len(parts)-1] + } + + logger.Waiting(fmt.Sprintf("Route to be accessible: %s in namespace %s", routeName, namespace)) + + // First, wait for the route to exist + deadline := time.Now().Add(timeout) + var routeURL string + + for time.Now().Before(deadline) { + cmd := exec.Command("oc", "get", "route", routeName, + "-n", namespace, + "-o", "jsonpath={.spec.host}") + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err == nil && stdout.String() != "" { + routeURL = "http://" + stdout.String() + break + } + + time.Sleep(interval) + } + + if routeURL == "" { + return fmt.Errorf("timeout waiting for route to exist: %s", routeName) + } + + // Now wait for the route to be HTTP accessible + return waitForHTTPURL(routeURL, timeout, interval) +} + +// waitForHTTPAccessible waits for an HTTP endpoint to be accessible +func waitForHTTPAccessible(t *testing.T, params *TestStepWaitParams, tmplData TemplateData, timeout, interval time.Duration) error { + url := renderTemplate(t, params.URL, tmplData) + + logger.Waiting(fmt.Sprintf("HTTP endpoint to be accessible: %s", url)) + + return waitForHTTPURL(url, timeout, interval) +} + +// waitForHTTPURL is a helper that polls an HTTP URL until it responds +func waitForHTTPURL(url string, timeout, interval time.Duration) error { + deadline := time.Now().Add(timeout) + + for time.Now().Before(deadline) { + // Use curl to test the endpoint + cmd := exec.Command("curl", "-f", "-s", "-o", "/dev/null", "-w", "%{http_code}", url) + + var stdout bytes.Buffer + cmd.Stdout = &stdout + + err := cmd.Run() + if err == nil { + statusCode := stdout.String() + // Accept any 2xx or 3xx status code + if strings.HasPrefix(statusCode, "2") || strings.HasPrefix(statusCode, "3") { + logger.Success(fmt.Sprintf("HTTP endpoint is accessible: %s (status: %s)", url, statusCode)) + return nil + } + } + + time.Sleep(interval) + } + + return fmt.Errorf("timeout waiting for HTTP endpoint to be accessible: %s", url) +} + +// waitForLogContains waits for a specific log message to appear +func waitForLogContains(t *testing.T, params *TestStepWaitParams, namespace string, tmplData TemplateData, timeout, interval time.Duration) error { + resource := renderTemplate(t, params.Resource, tmplData) + message := renderTemplate(t, params.Message, tmplData) + + logger.Waiting(fmt.Sprintf("Log message in %s: %q", resource, message)) + + deadline := time.Now().Add(timeout) + + for time.Now().Before(deadline) { + cmd := exec.Command("oc", "logs", resource, "-n", namespace, "--tail=100") + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + if err == nil && strings.Contains(stdout.String(), message) { + logger.Success(fmt.Sprintf("Log message found: %q", message)) + return nil + } + + time.Sleep(interval) + } + + return fmt.Errorf("timeout waiting for log message: %q in %s", message, resource) +} diff --git a/tests/scripts/delete-files-from-bitbucket-with-git.sh b/tests/scripts/delete-files-from-bitbucket-with-git.sh new file mode 100755 index 000000000..c30db14f5 --- /dev/null +++ b/tests/scripts/delete-files-from-bitbucket-with-git.sh @@ -0,0 +1,154 @@ +#!/usr/bin/env bash +set -eu +set -o pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +echo_done(){ + echo -e "\033[92mDONE\033[39m: $1" +} + +echo_warn(){ + echo -e "\033[93mWARN\033[39m: $1" +} + +echo_error(){ + echo -e "\033[31mERROR\033[39m: $1" +} + +echo_info(){ + echo -e "\033[94mINFO\033[39m: $1" +} + +BITBUCKET_URL="" +BITBUCKET_USER="" +BITBUCKET_PWD="" +BITBUCKET_PROJECT="unitt" +REPOSITORY= +BRANCH=master +PATHS=() +COMMIT_MESSAGE="Remove files/folders" + + +function usage { + printf "Delete files/folders from bitbucket.\n\n" + printf "This script will ask interactively for parameters by default.\n" + printf "However, you can also pass them directly. Usage:\n\n" + printf "\t-h|--help\t\tPrint usage\n" + printf "\t-v|--verbose\t\tEnable verbose mode\n" + printf "\t-i|--insecure\t\tAllow insecure server connections when using SSL\n" + printf "\n" + printf "\t-b|--bitbucket\t\tBitbucket URL, e.g. 'https://bitbucket.example.com'\n" + printf "\t-u|--user\t\tBitbucket user\n" + printf "\t-p|--password\t\tBitbucket password\n" + printf "\t-t|--project\tName of the Bitbucket project (defaults to '%s')\n" "${BITBUCKET_PROJECT}" + printf "\t-r|--repository\tName of the repository\n" + printf "\t-f|--files\tFiles/folders to delete (can be specified multiple times)\n" + printf "\t-m|--message\tCommit message (defaults to 'Remove files/folders')\n" +} + + +function create_url() { + url=$1 + user=$2 + password=$3 + + # URL encode the @ symbol in the username + user=$(echo $user | sed 's/@/%40/g') + password=$(echo $password | sed 's/@/%40/g') + + protocol=$(echo $url | grep :// | sed -e's,^\(.*://\).*,\1,g') + url=$(echo $url | sed -e s,$protocol,,g) + + echo "${protocol}${user}:${password}@${url}" +} + +function configure_user() { + git config --global user.email "x2odsedpcomm@boehringer-ingelheim.com" + git config --global user.name "EDPCommunity Automated Test" +} + +while [[ "$#" -gt 0 ]]; do + case $1 in + + -v|--verbose) set -x;; + + -h|--help) usage; exit 0;; + + -b|--bitbucket) BITBUCKET_URL="$2"; shift;; + -b=*|--bitbucket=*) BITBUCKET_URL="${1#*=}";; + + -u|--user) BITBUCKET_USER="$2"; shift;; + -u=*|--user=*) BITBUCKET_USER="${1#*=}";; + + -p|--password) BITBUCKET_PWD="$2"; shift;; + -p=*|--password=*) BITBUCKET_PWD="${1#*=}";; + + -t|--project) BITBUCKET_PROJECT="$2"; shift;; + -t=*|--project=*) BITBUCKET_PROJECT="${1#*=}";; + + -r|--repository) REPOSITORY="$2"; shift;; + -r=*|--repository=*) REPOSITORY="${1#*=}";; + + -f|--files) PATHS+=("$2"); shift;; + -f=*|--files=*) PATHS+=("${1#*=}");; + + -m|--message) COMMIT_MESSAGE="$2"; shift;; + -m=*|--message=*) COMMIT_MESSAGE="${1#*=}";; + + *) echo_error "Unknown parameter passed: $1"; usage; exit 1;; +esac; shift; done + +configure_user + +url=$(create_url "$BITBUCKET_URL" "$BITBUCKET_USER" "$BITBUCKET_PWD") + +# Create a temporary directory and store its name in a variable +TEMP_DIR=$(mktemp -d) + +# Clone the repository into the temporary directory +git clone "${url}/scm/${BITBUCKET_PROJECT}/${REPOSITORY}.git" "${TEMP_DIR}" + +# Change into the temporary directory +cd "${TEMP_DIR}" + +# Switch to the desired branch +git checkout "${BRANCH}" + +# Delete the files/folders +deleted_count=0 +for path in "${PATHS[@]}"; do + if [ -e "$path" ]; then + git rm -rf "$path" + deleted_count=$((deleted_count + 1)) + echo_info "Deleted: $path" + else + echo_warn "Path not found, skipping: $path" + fi +done + +if [ $deleted_count -eq 0 ]; then + echo_info "No files/folders found to delete" + cd - + rm -rf "${TEMP_DIR}" + exit 0 +fi + +# Check if there are changes to commit +if git diff --cached --quiet; then + echo_info "No changes to commit, all files already deleted or not found" +else + # Commit the changes + git commit -m "${COMMIT_MESSAGE}" + + # Push the changes + git push origin "${BRANCH}" +fi + +# Change back to the original directory +cd - + +# Remove the temporary directory +rm -rf "${TEMP_DIR}" + +echo_done "Deleted $deleted_count file(s)/folder(s) from ${BITBUCKET_PROJECT}/${REPOSITORY}" diff --git a/tests/scripts/free-unused-resources.sh b/tests/scripts/free-unused-resources.sh index 83890c00c..4fdd24ac9 100755 --- a/tests/scripts/free-unused-resources.sh +++ b/tests/scripts/free-unused-resources.sh @@ -2,8 +2,6 @@ echo " " -ME=$(basename $0) - function clean_containers { echo "Removing docker containers no more used... " if docker ps -a | grep -q 'Exited .* ago' ; then @@ -29,7 +27,6 @@ function clean_tests { } function clean_odsverify { - echo "Cleaning projects ODS__VERIFY... " if [ "true" == "$CLEAN_ODS_VERIFY" ]; then echo "Removing ODS VERIFY projects..." oc projects | grep '^\s*odsverify.*' | while read -r line; do @@ -44,14 +41,14 @@ function clean_odsverify { } function clean_images { - echo "Cleaning OC images" echo "oc adm prune images --keep-tag-revisions=1 --keep-younger-than=30m --confirm" oc adm prune images --keep-tag-revisions=1 --keep-younger-than=30m --confirm || true } function usage { + ME=$(basename $0) echo " " - echo "usage: ${ME} [--odsVerify] [--omitTests] [--omitTestsProject tes22]" + echo "usage: ${ME} [--odsVerify] [--omitTestsProject tes22]" echo " " } @@ -62,7 +59,6 @@ function echo_error() { OMIT_TESTS_PROJECT=none CLEAN_ODS_VERIFY="false" -CLEAN_TESTS="false" while [[ "$#" -gt 0 ]]; do case $1 in @@ -75,19 +71,11 @@ while [[ "$#" -gt 0 ]]; do --omitTestsProject) OMIT_TESTS_PROJECT="$2"; echo "Tests to omit: $OMIT_TESTS_PROJECT"; shift;; - --cleanTests) CLEAN_TESTS="true";; - *) echo_error "Unknown parameter passed: $1"; exit 1;; esac; shift; done clean_containers -if [ "true" == "${CLEAN_TESTS}" ]; then - clean_tests -else - echo " " - echo "${ME}: INFO: Not cleaning tests" - echo " " -fi +clean_tests clean_odsverify clean_images diff --git a/tests/scripts/get-artifact-from-jenkins-run.sh b/tests/scripts/get-artifact-from-jenkins-run.sh index a06539211..e9c63166e 100755 --- a/tests/scripts/get-artifact-from-jenkins-run.sh +++ b/tests/scripts/get-artifact-from-jenkins-run.sh @@ -6,10 +6,19 @@ PROJECT=$2 BUILD_NAME=$1 BUILD_URL=$(oc get -n ${PROJECT} build ${BUILD_NAME} -o jsonpath='{.metadata.annotations.openshift\.io/jenkins-build-uri}') echo $BUILD_URL -ARTIFACT_URL=$BUILD_URL/artifact/artifacts/$3 -echo "grabbing artifact from $ARTIFACT_URL - and storing in /tmp" -TOKEN=$(oc -n ${PROJECT} get sa/jenkins --template='{{range .secrets}}{{ .name }} {{end}}' | xargs -n 1 oc -n ${PROJECT} get secret --template='{{ if .data.token }}{{ .data.token }}{{end}}' | head -n 1 | base64 -d -) -httpCode=$(curl --insecure -sS ${ARTIFACT_URL} --header "Authorization: Bearer ${TOKEN}" -o /tmp/$3 -w "%{http_code}") +if [[ "$3" == *"/"* ]]; then + ARTIFACT_URL="${BUILD_URL}/artifact/${3}" +else + ARTIFACT_URL="${BUILD_URL}/artifact/artifacts/${3}" +fi + +# Extract just the filename from the artifact path for local storage +ARTIFACT_FILENAME=$(basename "$3") +OUTPUT_PATH="/tmp/${ARTIFACT_FILENAME}" + +echo "grabbing artifact from $ARTIFACT_URL - and storing in ${OUTPUT_PATH}" +TOKEN=$(oc whoami --show-token) +httpCode=$(curl --insecure -sS ${ARTIFACT_URL} --header "Authorization: Bearer ${TOKEN}" -o "${OUTPUT_PATH}" -w "%{http_code}") echo "response: $httpCode" if [ ! "${httpCode}" == "200" ]; then echo "Could not find artifact $3 - url: $ARTIFACT_URL" diff --git a/tests/scripts/print-jenkins-log.sh b/tests/scripts/print-jenkins-log.sh index 3da6d75bc..17f5e2ff8 100755 --- a/tests/scripts/print-jenkins-log.sh +++ b/tests/scripts/print-jenkins-log.sh @@ -26,7 +26,7 @@ if [ "OC_ERROR" == "${LOG_URL}" ]; then OC_ERROR="true" TOKEN="OC_ERROR" else - TOKEN=$(oc -n ${PROJECT} get sa/jenkins --template='{{range .secrets}}{{ .name }} {{end}}' | xargs -n 1 oc -n ${PROJECT} get secret --template='{{ if .data.token }}{{ .data.token }}{{end}}' | head -n 1 | base64 -d -) + TOKEN=$(oc whoami --show-token) fi if [ -f ${JENKINS_LOG_FILE} ]; then diff --git a/tests/scripts/print-jenkins-unittest-results.sh b/tests/scripts/print-jenkins-unittest-results.sh index fdb2370a2..f4a1d890c 100755 --- a/tests/scripts/print-jenkins-unittest-results.sh +++ b/tests/scripts/print-jenkins-unittest-results.sh @@ -7,6 +7,6 @@ BUILD_NAME=$2 BUILD_URL=$(oc get -n "${PROJECT}" build "${BUILD_NAME}" -o jsonpath='{.metadata.annotations.openshift\.io/jenkins-build-uri}') echo "Using $BUILD_URL/testReport calculated from ${BUILD_NAME}" -TOKEN=$(oc -n "${PROJECT}" get sa/jenkins --template='{{range .secrets}}{{ .name }} {{end}}' | xargs -n 1 oc -n "${PROJECT}" get secret --template='{{ if .data.token }}{{ .data.token }}{{end}}' | head -n 1 | base64 -d -) +TOKEN=$(oc whoami --show-token) curl --insecure -sS "${BUILD_URL}/testReport" --location --header "Authorization: Bearer ${TOKEN}" diff --git a/tests/scripts/print-sonar-scan-run.sh b/tests/scripts/print-sonar-scan-run.sh index da64b53ca..a76b46373 100755 --- a/tests/scripts/print-sonar-scan-run.sh +++ b/tests/scripts/print-sonar-scan-run.sh @@ -2,4 +2,12 @@ set -eu set -o pipefail -curl -sS --insecure -u $1: $2/api/navigation/component?componentKey=$3 | jq 'del(.analysisDate)' | jq 'del(.version)' | jq 'del(.id)' | jq 'del(.qualityProfiles[].key)' | jq 'del(.qualityGate.key)' | jq 'del (.extensions[] | select(.))' +curl -sS --insecure -u $1: $2/api/navigation/component?component=$3 | \ +jq 'del(.analysisDate)' | \ +jq 'del(.version)' | \ +jq 'del(.id)' | \ +jq 'del(.qualityProfiles[].key)' | \ +jq 'del(.qualityGate.key)' | \ +jq 'del(.extensions[] | select(.))' | \ +jq 'del(.qualityProfiles[] | select(.language == "xml"))' | \ +jq 'del(.organization)' diff --git a/tests/scripts/upload-file-to-bitbucket-with-git.sh b/tests/scripts/upload-file-to-bitbucket-with-git.sh new file mode 100755 index 000000000..210c8c26b --- /dev/null +++ b/tests/scripts/upload-file-to-bitbucket-with-git.sh @@ -0,0 +1,143 @@ +#!/usr/bin/env bash +set -eu +set -o pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +echo_done(){ + echo -e "\033[92mDONE\033[39m: $1" +} + +echo_warn(){ + echo -e "\033[93mWARN\033[39m: $1" +} + +echo_error(){ + echo -e "\033[31mERROR\033[39m: $1" +} + +echo_info(){ + echo -e "\033[94mINFO\033[39m: $1" +} + +BITBUCKET_URL="" +BITBUCKET_USER="" +BITBUCKET_PWD="" +BITBUCKET_PROJECT="unitt" +REPOSITORY= +BRANCH=master +FILE= +REPO_FILE= + + +function usage { + printf "Upload file to bitbucket.\n\n" + printf "This script will ask interactively for parameters by default.\n" + printf "However, you can also pass them directly. Usage:\n\n" + printf "\t-h|--help\t\tPrint usage\n" + printf "\t-v|--verbose\t\tEnable verbose mode\n" + printf "\t-i|--insecure\t\tAllow insecure server connections when using SSL\n" + printf "\n" + printf "\t-b|--bitbucket\t\tBitbucket URL, e.g. 'https://bitbucket.example.com'\n" + printf "\t-u|--user\t\tBitbucket user\n" + printf "\t-p|--password\t\tBitbucket password\n" + printf "\t-t|--project\tName of the Bitbucket project (defaults to '%s')\n" "${BITBUCKET_PROJECT}" + printf "\t-r|--repository\tName of the repository" + printf "\t-f|--file\tFile to upload" + printf "\t-n|--filename\tName of the file" + +} + + +function create_url() { + url=$1 + user=$2 + password=$3 + + # URL encode the @ symbol in the username + user=$(echo $user | sed 's/@/%40/g') + password=$(echo $password | sed 's/@/%40/g') + + protocol=$(echo $url | grep :// | sed -e's,^\(.*://\).*,\1,g') + url=$(echo $url | sed -e s,$protocol,,g) + + echo "${protocol}${user}:${password}@${url}" +} + +function configure_user() { + git config --global user.email "x2odsedpcomm@boehringer-ingelheim.com" + git config --global user.name "EDPCommunity Automated Test" +} + +while [[ "$#" -gt 0 ]]; do + case $1 in + + -v|--verbose) set -x;; + + -h|--help) usage; exit 0;; + + -b|--bitbucket) BITBUCKET_URL="$2"; shift;; + -b=*|--bitbucket=*) BITBUCKET_URL="${1#*=}";; + + -u|--user) BITBUCKET_USER="$2"; shift;; + -u=*|--user=*) BITBUCKET_USER="${1#*=}";; + + -p|--password) BITBUCKET_PWD="$2"; shift;; + -p=*|--password=*) BITBUCKET_PWD="${1#*=}";; + + -t|--project) BITBUCKET_PROJECT="$2"; shift;; + -t=*|--project=*) BITBUCKET_PROJECT="${1#*=}";; + + -r|--repository) REPOSITORY="$2"; shift;; + -r=*|--repository=*) REPOSITORY="${1#*=}";; + + -f|--file) FILE="$2"; shift;; + -f=*|--file=*) FILE="${1#*=}";; + + -n|--filename) REPO_FILE="$2"; shift;; + -n=*|--filename=*) REPO_FILE="${1#*=}";; + + *) echo_error "Unknown parameter passed: $1"; usage; exit 1;; +esac; shift; done + +configure_user + +url=$(create_url "$BITBUCKET_URL" "$BITBUCKET_USER" "$BITBUCKET_PWD") + +# Create a temporary directory and store its name in a variable +TEMP_DIR=$(mktemp -d) + +# Clone the repository into the temporary directory +git clone "${url}/scm/${BITBUCKET_PROJECT}/${REPOSITORY}.git" "${TEMP_DIR}" + +# Change into the temporary directory +cd "${TEMP_DIR}" + +# Switch to the desired branch +git checkout "${BRANCH}" + +# Ensure the target directory exists when a path is provided +mkdir -p "$(dirname "${REPO_FILE}")" + +# Copy the file into the repository +cp -f "$SCRIPT_DIR/${FILE}" "${REPO_FILE}" + +# Add the file to the repository +git add "${REPO_FILE}" + +# Commit and push only if there are changes +if git diff --cached --quiet; then + echo_info "No changes to commit, file is already up to date" +else + # Commit the change + git commit -m "Automated commit from test script" + + # Push the change + git push origin "${BRANCH}" +fi + +# Change back to the original directory +cd - + +# Remove the temporary directory +rm -rf "${TEMP_DIR}" \ No newline at end of file diff --git a/tests/scripts/upload-file-to-bitbucket.sh b/tests/scripts/upload-file-to-bitbucket.sh index c5166685c..e39dca497 100755 --- a/tests/scripts/upload-file-to-bitbucket.sh +++ b/tests/scripts/upload-file-to-bitbucket.sh @@ -76,24 +76,71 @@ while [[ "$#" -gt 0 ]]; do *) echo_error "Unknown parameter passed: $1"; exit 1;; esac; shift; done -lastCommit=$(curl --insecure -sS \ - -u "${BITBUCKET_USER}:${BITBUCKET_PWD}" \ - "${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/commits" | jq .values[0].id | sed 's|\"||g') - -echo "last commit: ${lastCommit}" - httpCode=$(curl --insecure -sS \ -u "${BITBUCKET_USER}:${BITBUCKET_PWD}" \ - -X PUT \ - -F branch=$BRANCH \ - -F sourceCommitId=$lastCommit \ - -F "comment=ods test" \ - -F "content=@${FILE}" \ - -F filename=blob \ + -X GET \ "${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/browse/${REPO_FILE}" \ + -o /dev/null \ -w "%{http_code}") if [ $httpCode != "200" ]; then - echo "An error occured during update of ${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/browse/${REPO_FILE} - error:$httpCode" - exit 1 + echo "New file added to Bitbucket" + httpCode=$(curl --insecure -sS \ + -u "${BITBUCKET_USER}:${BITBUCKET_PWD}" \ + -X PUT \ + -F branch=$BRANCH \ + -F "comment=ods test" \ + -F "content=@${FILE}" \ + -F filename=blob \ + "${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/browse/${REPO_FILE}" \ + -o /dev/null \ + -w "%{http_code}") + + if [ $httpCode != "200" ]; then + echo "An error occured during creation of ${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/browse/${REPO_FILE} - error:$httpCode" + exit 1 + fi + +else + echo "Update existing file." + + lastCommit=$(curl --insecure -sS \ + -u "${BITBUCKET_USER}:${BITBUCKET_PWD}" \ + "${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/commits" | jq .values[0].id | sed 's|\"||g') + + echo "last commit: ${lastCommit}" + + + echo "curl --insecure -sS \ + -u \"${BITBUCKET_USER}:${BITBUCKET_PWD}\" \ + -X PUT \ + -F branch=$BRANCH \ + -F sourceCommitId=$lastCommit \ + -F \"comment=ods test\" \ + -F \"content=@${FILE}\" \ + -F filename=blob \ + \"${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/browse/${REPO_FILE}\" \ + -o /dev/null \ + -w \"%{http_code}\"" + + + httpCode=$(curl --insecure -sS \ + -u "${BITBUCKET_USER}:${BITBUCKET_PWD}" \ + -X PUT \ + -F branch=$BRANCH \ + -F sourceCommitId=$lastCommit \ + -F "comment=ods test" \ + -F "content=@${FILE}" \ + -F filename=blob \ + "${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/browse/${REPO_FILE}" \ + -o /dev/null \ + -w "%{http_code}") + + echo "Upload error code: $httpCode" + if [ "$httpCode" != "200" ] && [ "$httpCode" != "409"]; then + echo "An error occured during update of ${BITBUCKET_URL}/rest/api/latest/projects/${BITBUCKET_PROJECT}/repos/${REPOSITORY}/browse/${REPO_FILE} - error:$httpCode" + exit 1 + fi fi + + diff --git a/tests/utils/configmaps.go b/tests/utils/configmaps.go new file mode 100644 index 000000000..18e538e7e --- /dev/null +++ b/tests/utils/configmaps.go @@ -0,0 +1,17 @@ +package utils + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" +) + +func FindConfigMap(configMaps *v1.ConfigMapList, configMapName string) error { + for _, configMap := range configMaps.Items { + if configMapName == configMap.Name { + return nil + } + } + + return fmt.Errorf("ConfigMap '%s' not found.", configMapName) +} diff --git a/tests/utils/constants.go b/tests/utils/constants.go deleted file mode 100644 index 53b4be03d..000000000 --- a/tests/utils/constants.go +++ /dev/null @@ -1,7 +0,0 @@ -package utils - -const PROJECT_NAME = "unitt" -const PROJECT_ENV_VAR = "PROJECT_ID=" + PROJECT_NAME -const PROJECT_NAME_CD = "unitt-cd" -const PROJECT_NAME_DEV = "unitt-dev" -const PROJECT_NAME_TEST = "unitt-test" diff --git a/tests/utils/environment.go b/tests/utils/environment.go new file mode 100644 index 000000000..282a61311 --- /dev/null +++ b/tests/utils/environment.go @@ -0,0 +1,10 @@ +package utils + +import "os" + +func GetEnv(key, fallback string) string { + if value, ok := os.LookupEnv(key); ok { + return value + } + return fallback +} diff --git a/tests/utils/filter-quickstarters.go b/tests/utils/filter-quickstarters.go new file mode 100644 index 000000000..5eee50e47 --- /dev/null +++ b/tests/utils/filter-quickstarters.go @@ -0,0 +1,55 @@ +package utils + +import ( + "bufio" + "fmt" + "os" + "strings" + "testing" +) + +func RemoveExcludedQuickstarters(t *testing.T, dir string, quickstarterPaths []string) []string { + var quickstarterPathsFiltered []string + var exclusionList []string + + var filePath string = fmt.Sprintf("%s/../%s", dir, "quickStartersExclusionList.txt") + fmt.Printf("\n\nLooking for file quickStartersExclusionList.txt ... %s\n", filePath) + + _, err := os.Stat(filePath) + + if os.IsNotExist(err) { + fmt.Printf("File %s does not exist, The list of Quickstarters is not filtered.\n", filePath) + return quickstarterPaths + } + + file, err := os.Open(filePath) + if err != nil { + fmt.Println(err) + t.Fatal(err) + } + defer file.Close() + + scanner := bufio.NewScanner(file) + for scanner.Scan() { + exclusionList = append(exclusionList, scanner.Text()) + } + + fmt.Printf("\n\nQuickStarters that will be excluded...\n%s", exclusionList) + + for _, quickstarterPath := range quickstarterPaths { + if sliceContainsString(exclusionList, quickstarterPath) == -1 { + quickstarterPathsFiltered = append(quickstarterPathsFiltered, quickstarterPath) + } + } + + return quickstarterPathsFiltered +} + +func sliceContainsString(slice []string, str string) int { + for pos, s := range slice { + if strings.Contains(str, s) { + return pos + } + } + return -1 +} diff --git a/tests/utils/jenkins.go b/tests/utils/jenkins.go index c484c17e7..d3080f914 100644 --- a/tests/utils/jenkins.go +++ b/tests/utils/jenkins.go @@ -5,8 +5,9 @@ import ( "crypto/tls" "encoding/json" "fmt" - "io/ioutil" + "io" "net/http" + "os" "strings" "time" @@ -59,7 +60,7 @@ func RunJenkinsPipeline(jenkinsFile string, req RequestBuild, pipelineComponentP } defer response.Body.Close() - bodyBytes, err := ioutil.ReadAll(response.Body) + bodyBytes, err := io.ReadAll(response.Body) if err != nil { return "", err } @@ -67,7 +68,7 @@ func RunJenkinsPipeline(jenkinsFile string, req RequestBuild, pipelineComponentP fmt.Printf("Pipeline: %s, response: %s\n", pipelineComponentPart, string(bodyBytes)) if response.StatusCode >= http.StatusAccepted { - bodyBytes, err := ioutil.ReadAll(response.Body) + bodyBytes, err := io.ReadAll(response.Body) if err != nil { return "", err } @@ -259,7 +260,7 @@ func VerifyJenkinsRunAttachments(projectName string, buildName string, artifacts } func VerifyJenkinsStages(goldenFile string, gotStages string) error { - wantStages, err := ioutil.ReadFile(goldenFile) + wantStages, err := os.ReadFile(goldenFile) if err != nil { return fmt.Errorf("Failed to load golden file to verify Jenkins stages: %w", err) } diff --git a/tests/utils/ods-env.go b/tests/utils/ods-env.go index ca7405c16..d5ca9a0b2 100644 --- a/tests/utils/ods-env.go +++ b/tests/utils/ods-env.go @@ -29,5 +29,10 @@ func ReadConfiguration() (map[string]string, error) { } } + for _, e := range os.Environ() { + parts := strings.SplitN(e, "=", 2) + values[parts[0]] = parts[1] + } + return values, nil } diff --git a/tests/utils/openshift-client.go b/tests/utils/openshift-client.go index dbe02e49e..ca7dcae4e 100644 --- a/tests/utils/openshift-client.go +++ b/tests/utils/openshift-client.go @@ -3,19 +3,18 @@ package utils import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "os" - "path/filepath" ) func GetOCClient() (*rest.Config, error) { - home, err := os.UserHomeDir() - if err != nil { - return nil, err - } - config, err := clientcmd.BuildConfigFromFlags("", filepath.Join(home, ".kube", "config")) + + kubeCfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), + &clientcmd.ConfigOverrides{}, + ) + restCfg, err := kubeCfg.ClientConfig() if err != nil { return nil, err } - return config, nil + return restCfg, nil } diff --git a/tests/utils/project_names.go b/tests/utils/project_names.go new file mode 100644 index 000000000..38d2574f6 --- /dev/null +++ b/tests/utils/project_names.go @@ -0,0 +1,15 @@ +package utils + +var PROJECT_NAME = "unitt" +var PROJECT_ENV_VAR = "PROJECT_ID=" + PROJECT_NAME +var PROJECT_NAME_CD = PROJECT_NAME + "-cd" +var PROJECT_NAME_DEV = PROJECT_NAME + "-dev" +var PROJECT_NAME_TEST = PROJECT_NAME + "-test" + +func Set_project_name(project string) { + PROJECT_NAME = project + PROJECT_ENV_VAR = "PROJECT_ID=" + PROJECT_NAME + PROJECT_NAME_CD = PROJECT_NAME + "-cd" + PROJECT_NAME_DEV = PROJECT_NAME + "-dev" + PROJECT_NAME_TEST = PROJECT_NAME + "-test" +} diff --git a/tests/utils/projects.go b/tests/utils/projects.go index 97e1c4c53..961b512be 100644 --- a/tests/utils/projects.go +++ b/tests/utils/projects.go @@ -2,7 +2,9 @@ package utils import ( "fmt" + v1 "github.com/openshift/api/project/v1" + rbacv1 "k8s.io/api/rbac/v1" ) func FindProject(projects *v1.ProjectList, projectName string) error { @@ -13,3 +15,17 @@ func FindProject(projects *v1.ProjectList, projectName string) error { } return fmt.Errorf("Project '%s' not found", projectName) } + +func FindRoleBinding(roleBindings *rbacv1.RoleBindingList, subjectName, subjectType, namespace, roleName string) error { + for _, roleBinding := range roleBindings.Items { + for _, subject := range roleBinding.Subjects { + if subject.Name == subjectName && subject.Kind == subjectType && roleBinding.RoleRef.Name == roleName { + // For ClusterRoleBindings or in the case of Groups with no namespace, namespace can be empty + if namespace == "" || subject.Namespace == namespace { + return nil + } + } + } + } + return fmt.Errorf("RoleBinding not found: subjectName=%s, subjectType=%s, namespace=%s, roleName=%s", subjectName, subjectType, namespace, roleName) +} diff --git a/tests/utils/provisioning.go b/tests/utils/provisioning.go index d12076658..3cce94d83 100644 --- a/tests/utils/provisioning.go +++ b/tests/utils/provisioning.go @@ -2,7 +2,7 @@ package utils import ( "fmt" - "io/ioutil" + "os" "strings" "time" ) @@ -65,7 +65,7 @@ func (api *ProvisionAPI) CreateProject() ([]byte, error) { fmt.Printf("Provision app raw logs: %s\n", stdout) // get the (json) response from the script created file - log, err := ioutil.ReadFile("response.txt") + log, err := os.ReadFile("response.txt") if err != nil { return nil, fmt.Errorf("Could not read response file?!, %s", err) } @@ -126,7 +126,7 @@ func (api *ProvisionAPI) CreateComponent() ([]byte, error) { fmt.Printf("Provision app raw logs:%s\n", stages) // get the (json) response from the script created file - log, err := ioutil.ReadFile("response.txt") + log, err := os.ReadFile("response.txt") if err != nil { return nil, fmt.Errorf("Could not read response file?!, %w", err) } diff --git a/tests/utils/resources.go b/tests/utils/resources.go index 7a151674b..a6cac9bb2 100644 --- a/tests/utils/resources.go +++ b/tests/utils/resources.go @@ -6,8 +6,11 @@ import ( appsClientV1 "github.com/openshift/client-go/apps/clientset/versioned/typed/apps/v1" buildClientV1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" imageClientV1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + routeClientV1 "github.com/openshift/client-go/route/clientset/versioned/typed/route/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" + unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" ) @@ -102,42 +105,186 @@ func CheckDeploymentConfigs(namespace string, deploymentConfigs []string, config } } -func CheckServices(namespace string, services []string, config *rest.Config, t *testing.T) { +func CheckServices(namespace string, services []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, services, dyn, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}, nil, t) +} - if len(services) == 0 { - return - } +func CheckResources(resources Resources, t *testing.T) { - clientset, err := kubernetes.NewForConfig(config) + config, err := GetOCClient() + if err != nil { + t.Fatal(err) + } + dyn, err := dynamic.NewForConfig(config) if err != nil { t.Fatal(err) } - serviceClient := clientset.CoreV1().Services(namespace) - serviceList, err := serviceClient.List(metav1.ListOptions{}) + CheckImageStreams(resources.Namespace, resources.ImageStreams, config, t) + CheckImageTags(resources.Namespace, resources.ImageTags, config, t) + CheckBuildConfigs(resources.Namespace, resources.BuildConfigs, config, t) + CheckDeploymentConfigs(resources.Namespace, resources.DeploymentConfigs, config, t) + CheckDeployments(resources.Namespace, resources.Deployments, dyn, t) + CheckStatefulSets(resources.Namespace, resources.StatefulSets, dyn, t) + CheckDaemonSets(resources.Namespace, resources.DaemonSets, dyn, t) + CheckReplicaSets(resources.Namespace, resources.ReplicaSets, dyn, t) + CheckServices(resources.Namespace, resources.Services, dyn, t) + CheckRoutes(resources.Namespace, resources.Routes, config, t) + CheckIngresses(resources.Namespace, resources.Ingresses, dyn, t) + CheckConfigMaps(resources.Namespace, resources.ConfigMaps, dyn, t) + CheckSecrets(resources.Namespace, resources.Secrets, dyn, t) + CheckPersistentVolumeClaims(resources.Namespace, resources.PersistentVolumeClaims, dyn, t) + CheckServiceAccounts(resources.Namespace, resources.ServiceAccounts, dyn, t) + CheckRoles(resources.Namespace, resources.Roles, dyn, t) + CheckRoleBindings(resources.Namespace, resources.RoleBindings, dyn, t) + CheckNetworkPolicies(resources.Namespace, resources.NetworkPolicies, dyn, t) + CheckJobs(resources.Namespace, resources.Jobs, dyn, t) + CheckCronJobs(resources.Namespace, resources.CronJobs, dyn, t) + CheckPods(resources.Namespace, resources.Pods, dyn, t) + CheckHorizontalPodAutoscalers(resources.Namespace, resources.HorizontalPodAutoscalers, dyn, t) + +} + +func CheckRoutes(namespace string, routes []string, config *rest.Config, t *testing.T) { + + if len(routes) == 0 { + return + } + + routeClient, err := routeClientV1.NewForConfig(config) if err != nil { t.Error(err) } - for _, service := range services { - if err = FindServiceHasPods(serviceList, service); err != nil { + routeList, err := routeClient.Routes(namespace).List(metav1.ListOptions{}) + if err != nil { + t.Error(err) + } + + for _, route := range routes { + if err = FindRoute(routeList, route); err != nil { t.Error(err) } } } -func CheckResources(resources Resources, t *testing.T) { +func CheckConfigMaps(namespace string, configMaps []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, configMaps, dyn, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}, nil, t) +} + +func CheckSecrets(namespace string, secrets []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, secrets, dyn, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"}, nil, t) +} + +func CheckPersistentVolumeClaims(namespace string, pvcs []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, pvcs, dyn, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "persistentvolumeclaims"}, nil, t) +} + +func CheckServiceAccounts(namespace string, serviceAccounts []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, serviceAccounts, dyn, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "serviceaccounts"}, nil, t) +} + +func CheckRoleBindings(namespace string, roleBindings []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, roleBindings, dyn, schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"}, nil, t) +} + +func CheckCronJobs(namespace string, cronJobs []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic( + namespace, + cronJobs, + dyn, + schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "cronjobs"}, + []schema.GroupVersionResource{{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}}, + t, + ) +} + +func CheckDeployments(namespace string, deployments []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, deployments, dyn, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}, nil, t) +} + +func CheckStatefulSets(namespace string, statefulSets []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, statefulSets, dyn, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}, nil, t) +} + +func CheckDaemonSets(namespace string, daemonSets []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, daemonSets, dyn, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}, nil, t) +} + +func CheckReplicaSets(namespace string, replicaSets []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, replicaSets, dyn, schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}, nil, t) +} + +func CheckIngresses(namespace string, ingresses []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic( + namespace, + ingresses, + dyn, + schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "ingresses"}, + []schema.GroupVersionResource{ + {Group: "networking.k8s.io", Version: "v1beta1", Resource: "ingresses"}, + {Group: "extensions", Version: "v1beta1", Resource: "ingresses"}, + }, + t, + ) +} + +func CheckRoles(namespace string, roles []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, roles, dyn, schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "roles"}, nil, t) +} + +func CheckNetworkPolicies(namespace string, networkPolicies []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, networkPolicies, dyn, schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1", Resource: "networkpolicies"}, nil, t) +} + +func CheckJobs(namespace string, jobs []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, jobs, dyn, schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}, nil, t) +} + +func CheckPods(namespace string, pods []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, pods, dyn, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}, nil, t) +} + +func CheckHorizontalPodAutoscalers(namespace string, hpas []string, dyn dynamic.Interface, t *testing.T) { + checkResourceNamesDynamic(namespace, hpas, dyn, schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}, nil, t) +} + +func checkResourceNamesDynamic(namespace string, expected []string, dyn dynamic.Interface, primary schema.GroupVersionResource, fallbacks []schema.GroupVersionResource, t *testing.T) { + if len(expected) == 0 { + return + } + + candidates := append([]schema.GroupVersionResource{primary}, fallbacks...) + + var used schema.GroupVersionResource + var list *unstructured.UnstructuredList + var err error + + for _, gvr := range candidates { + used = gvr + list, err = dyn.Resource(gvr).Namespace(namespace).List(metav1.ListOptions{}) + if err == nil { + break + } + } - config, err := GetOCClient() if err != nil { - t.Fatal(err) + t.Errorf("Failed to list %s in %s: %v", primary.Resource, namespace, err) + return } - CheckImageStreams(resources.Namespace, resources.ImageStreams, config, t) - CheckImageTags(resources.Namespace, resources.ImageTags, config, t) - CheckBuildConfigs(resources.Namespace, resources.BuildConfigs, config, t) - CheckDeploymentConfigs(resources.Namespace, resources.DeploymentConfigs, config, t) - CheckServices(resources.Namespace, resources.Services, config, t) + t.Logf("Checking namespace %s for %s, found %d total", namespace, used.Resource, len(list.Items)) + names := make(map[string]struct{}, len(list.Items)) + for i := range list.Items { + name := list.Items[i].GetName() + names[name] = struct{}{} + t.Logf("Found %s: %s", used.Resource, name) + } + for _, resourceName := range expected { + if _, ok := names[resourceName]; !ok { + t.Errorf("%s '%s' not found in namespace %s", used.Resource, resourceName, namespace) + } + } } diff --git a/tests/utils/role-bindings.go b/tests/utils/role-bindings.go deleted file mode 100644 index 64cc2b28f..000000000 --- a/tests/utils/role-bindings.go +++ /dev/null @@ -1,28 +0,0 @@ -package utils - -import ( - "fmt" - v1 "k8s.io/api/rbac/v1" -) - -func FindRoleBinding(roleBindings *v1.RoleBindingList, subjectName string, subjectType string, subjectNamespace string, roleName string) error { - for _, roleBinding := range roleBindings.Items { - for _, subject := range roleBinding.Subjects { - if subject.Name == subjectName && subject.Namespace == subjectNamespace && roleBinding.RoleRef.Name == roleName && subject.Kind == subjectType { - return nil - } - } - } - return fmt.Errorf("Subject '%s' of kind '%s' in namespace '%s' does not have the role '%s'", subjectName, subjectType, subjectNamespace, roleName) -} - -func FindClusterRoleBinding(roleBindings *v1.ClusterRoleBindingList, subjectName string, subjectType string, subjectNamespace string, roleName string) error { - for _, roleBinding := range roleBindings.Items { - for _, subject := range roleBinding.Subjects { - if subject.Name == subjectName && subject.Namespace == subjectNamespace && roleBinding.RoleRef.Name == roleName && subject.Kind == subjectType { - return nil - } - } - } - return fmt.Errorf("Subject '%s' of kind '%s' in namespace '%s' does not have the cluster role '%s'", subjectName, subjectType, subjectNamespace, roleName) -} diff --git a/tests/utils/routes.go b/tests/utils/routes.go new file mode 100644 index 000000000..f454ceb2a --- /dev/null +++ b/tests/utils/routes.go @@ -0,0 +1,17 @@ +package utils + +import ( + "fmt" + + v1 "github.com/openshift/api/route/v1" +) + +func FindRoute(routes *v1.RouteList, routeName string) error { + for _, route := range routes.Items { + if routeName == route.Name { + return nil + } + } + + return fmt.Errorf("Route '%s' not found.", routeName) +} diff --git a/tests/utils/secrets.go b/tests/utils/secrets.go new file mode 100644 index 000000000..a32343b93 --- /dev/null +++ b/tests/utils/secrets.go @@ -0,0 +1,17 @@ +package utils + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" +) + +func FindSecret(secrets *v1.SecretList, secretName string) error { + for _, secret := range secrets.Items { + if secretName == secret.Name { + return nil + } + } + + return fmt.Errorf("Secret '%s' not found.", secretName) +} diff --git a/tests/utils/serviceaccounts.go b/tests/utils/serviceaccounts.go new file mode 100644 index 000000000..bff23d010 --- /dev/null +++ b/tests/utils/serviceaccounts.go @@ -0,0 +1,17 @@ +package utils + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" +) + +func FindServiceAccount(serviceAccounts *v1.ServiceAccountList, serviceAccountName string) error { + for _, serviceAccount := range serviceAccounts.Items { + if serviceAccountName == serviceAccount.Name { + return nil + } + } + + return fmt.Errorf("ServiceAccount '%s' not found.", serviceAccountName) +} diff --git a/tests/utils/types.go b/tests/utils/types.go index 4d57a89af..4e53574ce 100644 --- a/tests/utils/types.go +++ b/tests/utils/types.go @@ -24,10 +24,27 @@ type ImageTag struct { } type Resources struct { - Namespace string - ImageTags []ImageTag - BuildConfigs []string - DeploymentConfigs []string - Services []string - ImageStreams []string + Namespace string + ImageTags []ImageTag + BuildConfigs []string + DeploymentConfigs []string + Deployments []string + StatefulSets []string + DaemonSets []string + ReplicaSets []string + Services []string + ImageStreams []string + Routes []string + Ingresses []string + ConfigMaps []string + Secrets []string + PersistentVolumeClaims []string + ServiceAccounts []string + Roles []string + RoleBindings []string + NetworkPolicies []string + Jobs []string + CronJobs []string + Pods []string + HorizontalPodAutoscalers []string } From 206c44dadaa784f07eede55c4dcddc8fcb783cbd Mon Sep 17 00:00:00 2001 From: brais <26645694+BraisVQ@users.noreply.github.com> Date: Tue, 10 Feb 2026 16:04:08 +0100 Subject: [PATCH 2/8] Update keystore path in import_certs.sh script (#1365) --- CHANGELOG.md | 2 ++ ods-provisioning-app/docker/import_certs.sh | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7191c3c11..9213e1165 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ ### Changed - Improved automatic tests for the quickstarters ([#1362](https://github.com/opendevstack/ods-core/pull/1362)) +- Update keystore path in import_certs.sh script ([#1365](https://github.com/opendevstack/ods-core/pull/1365)) + ### Fixed ## [4.11.1] - 2025-12-05 diff --git a/ods-provisioning-app/docker/import_certs.sh b/ods-provisioning-app/docker/import_certs.sh index fd9ec4fdd..47aace2f4 100755 --- a/ods-provisioning-app/docker/import_certs.sh +++ b/ods-provisioning-app/docker/import_certs.sh @@ -4,7 +4,7 @@ set -eu if [[ ! -z ${APP_DNS:=""} ]]; then echo "Setting up certificates from APP_DNS=${APP_DNS} ..."; \ - KEYSTORE="$JAVA_HOME/lib/security/cacerts" + KEYSTORE="/opt/java/openjdk/lib/security/cacerts" arrIN=(${APP_DNS//;/ }) for val in "${arrIN[@]}"; @@ -22,4 +22,4 @@ if [[ ! -z ${APP_DNS:=""} ]]; then echo "Done with certificate setup" else echo 'No certificates to import' -fi \ No newline at end of file +fi From 6d26ccc124228a442a747c0354ff066bec346a70 Mon Sep 17 00:00:00 2001 From: Jorge Romero Date: Wed, 11 Feb 2026 18:42:45 +0100 Subject: [PATCH 3/8] Remove todo.md (#1363) --- tests/quickstarter/TODO.md | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 tests/quickstarter/TODO.md diff --git a/tests/quickstarter/TODO.md b/tests/quickstarter/TODO.md deleted file mode 100644 index cdb45bbaf..000000000 --- a/tests/quickstarter/TODO.md +++ /dev/null @@ -1,22 +0,0 @@ -Plan: Enhance Quickstarters Testing Framework -TL;DR: The ODS quickstarters testing framework is well-designed but lacks key capabilities for enterprise-grade functional testing. The framework needs structured reporting, test lifecycle hooks, better error diagnostics, and test data utilities to become more robust and easier to use. Enhancements should focus on observability, maintainability, and developer experience without breaking the current YAML-based configuration model. - -Steps -[X] Add test lifecycle hooks — Implement before/after step and component setup/teardown mechanisms in steps/types.go and expand quickstarter_test.go to support optional hook execution. - -[X] Build structured test reporting — Create a new reporting/ package with metrics collection (execution time per step, pass/fail counts, resource utilization), and export JUnit XML natively instead of relying on external tools. - -[X] Enhance error diagnostics — Extend error handling in verification.go and step implementations to capture context (pod logs, events, previous states) and provide actionable suggestions on common failures. - -[ ] Add test data utilities — Create a fixtures/ package with builders for common test objects (namespaces, deployments, ConfigMaps) and a cleanup policy system to handle data rollback after tests. - -[X] Implement execution control — Add YAML schema validation, conditional step execution (skip if conditions), and step-level retry logic in steps.go and relevant step files. - -[X] Improve extensibility — Refactor step registration from switch statements to a plugin/handler registry pattern, and document step authoring guidelines in QUICKSTARTERS_TESTS.md. - -Further Considerations -Backward compatibility — All changes should remain backward compatible with existing YAML test definitions; new features should be optional fields. - -Reporting scope — Focus on actionable metrics (timing, failures, resource states) vs. comprehensive performance profiling (which may be overkill); prioritize JUnit XML natively for CI/CD. - -Hook complexity trade-off — Hooks should be simple (shell scripts or templates) rather than requiring Go code, to keep YAML-based tests maintainable. \ No newline at end of file From 0ffe2348907a10ff44aa3fcb16d38788fe848b7c Mon Sep 17 00:00:00 2001 From: roicarrera <120183761+roicarrera@users.noreply.github.com> Date: Thu, 12 Feb 2026 15:50:18 +0100 Subject: [PATCH 4/8] feat: added allure plugin and command line installation (#1364) --- CHANGELOG.md | 1 + .../init.groovy.d/allure-cli.groovy | 33 +++++++++++++++++++ jenkins/master/plugins.ubi8.txt | 1 + 3 files changed, 35 insertions(+) create mode 100644 jenkins/master/configuration/init.groovy.d/allure-cli.groovy diff --git a/CHANGELOG.md b/CHANGELOG.md index 9213e1165..d458e76ef 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ ### Changed - Improved automatic tests for the quickstarters ([#1362](https://github.com/opendevstack/ods-core/pull/1362)) - Update keystore path in import_certs.sh script ([#1365](https://github.com/opendevstack/ods-core/pull/1365)) +- Added allure plugin and command line installation to jenkins master image ([#1364](https://github.com/opendevstack/ods-core/pull/1364)) ### Fixed diff --git a/jenkins/master/configuration/init.groovy.d/allure-cli.groovy b/jenkins/master/configuration/init.groovy.d/allure-cli.groovy new file mode 100644 index 000000000..627d8e6fe --- /dev/null +++ b/jenkins/master/configuration/init.groovy.d/allure-cli.groovy @@ -0,0 +1,33 @@ +import jenkins.model.Jenkins +import hudson.tools.InstallSourceProperty +import ru.yandex.qatools.allure.jenkins.tools.AllureCommandlineInstallation +import ru.yandex.qatools.allure.jenkins.tools.AllureCommandlineInstaller + +def toolName = "Allure" +def allureVersion = "2.34.0" + +def j = Jenkins.get() +def desc = j.getDescriptorByType(AllureCommandlineInstallation.DescriptorImpl) + +def current = (desc.getInstallations() ?: []) as AllureCommandlineInstallation[] +def already = current.find { it?.name == toolName } + +if (already) { + println("[init] Allure Commandline '${toolName}' already exists. Skipping.") + return +} + +def installer = new AllureCommandlineInstaller(allureVersion) +def prop = new InstallSourceProperty([installer]) + +def newInst = new AllureCommandlineInstallation( + toolName, + "", + [prop] +) + +def updated = (current.toList() + newInst) as AllureCommandlineInstallation[] +desc.setInstallations(updated) +desc.save() + +println("[init] Allure Commandline '${toolName}' configured (version ${allureVersion})") diff --git a/jenkins/master/plugins.ubi8.txt b/jenkins/master/plugins.ubi8.txt index 8b932c784..6a43b8a72 100644 --- a/jenkins/master/plugins.ubi8.txt +++ b/jenkins/master/plugins.ubi8.txt @@ -2,6 +2,7 @@ greenballs:1.15.1 sonar:2.17.2 ansicolor:1.0.4 +allure-jenkins-plugin:2.34.0 audit-trail:361.v82cde86c784e Office-365-Connector:5.0.0 mask-passwords:173.v6a_077a_291eb_5 From b630c27e14e01c4290b67861b571ce6ddce908d7 Mon Sep 17 00:00:00 2001 From: jordivx Date: Wed, 18 Feb 2026 16:50:18 +0100 Subject: [PATCH 5/8] Remove unnecessary configurations from ods-api-service (#1366) --- CHANGELOG.md | 1 + .../ods-core.ods-api-service.env.sample | 30 ------ .../chart/templates/core/deployment.yaml | 2 - ...rvice-projects-info-service-configmap.yaml | 30 ------ ...-service-projects-info-service-secret.yaml | 23 ----- .../chart/templates/tpl/_application.tpl | 96 ------------------- ods-api-service/chart/values.yaml.template | 59 ------------ 7 files changed, 1 insertion(+), 240 deletions(-) delete mode 100644 ods-api-service/chart/templates/external-service-projects-info-service/external-service-projects-info-service-secret.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index d458e76ef..2e46738d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ - Improved automatic tests for the quickstarters ([#1362](https://github.com/opendevstack/ods-core/pull/1362)) - Update keystore path in import_certs.sh script ([#1365](https://github.com/opendevstack/ods-core/pull/1365)) - Added allure plugin and command line installation to jenkins master image ([#1364](https://github.com/opendevstack/ods-core/pull/1364)) +- Remove unnecessary configurations from ods-api-service ([#1366](https://github.com/opendevstack/ods-core/pull/1366)) ### Fixed diff --git a/configuration-sample/ods-core.ods-api-service.env.sample b/configuration-sample/ods-core.ods-api-service.env.sample index b00ce4bef..3b0f8a721 100644 --- a/configuration-sample/ods-core.ods-api-service.env.sample +++ b/configuration-sample/ods-core.ods-api-service.env.sample @@ -40,36 +40,6 @@ UIPATH_QUEUE_ITEMS_ENDPOINT=/odata/QueueItems # Projects Info Service PROJECTS_INFO_SERVICE_BASE_URL=https://projects-info-service.example.com -AZURE_ACCESS_TOKEN=example-azure-token -AZURE_DATAHUB_GROUP_ID=example-datahub-group -TESTING_HUB_API_URL=https://testinghub-api.example.com/v1/projects -TESTING_HUB_API_TOKEN=example-testing-hub-token -TESTING_HUB_DEFAULT_PROJECTS=PROJECT1:1, PROJECT2:2 - -# OpenShift Cluster Tokens -OPENSHIFT_USTEST_API_URL=https://api.us-test.example.com:6443 -OPENSHIFT_USTEST_TOKEN=example-ustest-token-change-me - -OPENSHIFT_EUDEV_API_URL=https://api.eu-dev.example.com:6443 -OPENSHIFT_EUDEV_TOKEN=example-eudev-token-change-me - -OPENSHIFT_USDEV_API_URL=https://api.us-dev.example.com:6443 -OPENSHIFT_USDEV_TOKEN=example-usdev-token-change-me - -OPENSHIFT_CNDEV_API_URL=https://api.cn-dev.example.com:6443 -OPENSHIFT_CNDEV_TOKEN=example-cndev-token-change-me - -OPENSHIFT_INHDEV_API_URL=https://api.inh-dev.example.com:6443 -OPENSHIFT_INHDEV_TOKEN=example-inhdev-token-change-me - -# Bitbucket Platforms Configuration -BITBUCKET_PLATFORMS_BASE_PATH=https://bitbucket.example.com/projects/PLATFORMS/repos/sections-links/raw/ -BITBUCKET_PLATFORMS_USTEST=us-test-sections.yml?at=main -BITBUCKET_PLATFORMS_EUDEV=eu-sections.yml?at=main -BITBUCKET_PLATFORMS_USDEV=us-sections.yml?at=main -BITBUCKET_PLATFORMS_CNDEV=cn-sections.yml?at=main -BITBUCKET_PLATFORMS_INHDEV=inh-sections.yml?at=main -BITBUCKET_PLATFORMS_BEARER_TOKEN=example-bitbucket-bearer-token # Project Users JWT Secret PROJECT_USERS_JWT_SECRET=example-jwt-secret-key-256bit-change-in-production diff --git a/ods-api-service/chart/templates/core/deployment.yaml b/ods-api-service/chart/templates/core/deployment.yaml index 495b9cb2c..ecc89934b 100644 --- a/ods-api-service/chart/templates/core/deployment.yaml +++ b/ods-api-service/chart/templates/core/deployment.yaml @@ -74,8 +74,6 @@ spec: {{- if .Values.externalServices.projectsInfoService.enabled }} - configMapRef: name: {{ include "chart.fullname" . }}-projects-info-service-config - - secretRef: - name: {{ include "chart.fullname" . }}-projects-info-service-credentials {{- end }} {{- if gt (len .Values.externalServices.openshift.instances) 0 }} - configMapRef: diff --git a/ods-api-service/chart/templates/external-service-projects-info-service/external-service-projects-info-service-configmap.yaml b/ods-api-service/chart/templates/external-service-projects-info-service/external-service-projects-info-service-configmap.yaml index 5af9a3836..cfea5197c 100644 --- a/ods-api-service/chart/templates/external-service-projects-info-service/external-service-projects-info-service-configmap.yaml +++ b/ods-api-service/chart/templates/external-service-projects-info-service/external-service-projects-info-service-configmap.yaml @@ -8,34 +8,4 @@ metadata: app.kubernetes.io/component: projects-info-service-config data: PROJECTS_INFO_SERVICE_BASE_URL: {{ .Values.externalServices.projectsInfoService.baseUrl | quote }} - PROJECTS_INFO_SERVICE_SSL_VERIFY: {{ .Values.externalServices.projectsInfoService.ssl.verifyCertificates | quote }} - {{- if .Values.externalServices.projectsInfoService.ssl.trustStorePath }} - PROJECTS_INFO_SERVICE_SSL_TRUSTSTORE_PATH: {{ .Values.externalServices.projectsInfoService.ssl.trustStorePath | quote }} - {{- end }} - {{- if .Values.externalServices.projectsInfoService.ssl.trustStoreType }} - PROJECTS_INFO_SERVICE_SSL_TRUSTSTORE_TYPE: {{ .Values.externalServices.projectsInfoService.ssl.trustStoreType | quote }} - {{- end }} - PROJECTS_INFO_SERVICE_AZURE_DATA_HUB_GROUP_ID: {{ .Values.externalServices.projectsInfoService.azure.datahub.groupId | quote }} - PROJECTS_INFO_SERVICE_AZURE_GROUPS_PAGE_SIZE: {{ .Values.externalServices.projectsInfoService.azure.groups.pageSize | quote }} - PROJECTS_INFO_SERVICE_TESTING_HUB_DEFAULT_PROJECTS: {{ .Values.externalServices.projectsInfoService.testingHub.default.projects | quote }} - PROJECTS_INFO_SERVICE_TESTING_HUB_API_URL: {{ .Values.externalServices.projectsInfoService.testingHub.api.url | quote }} - PROJECTS_INFO_SERVICE_TESTING_HUB_API_PAGE_SIZE: {{ .Values.externalServices.projectsInfoService.testingHub.api.pageSize | quote }} - PROJECTS_INFO_SERVICE_CUSTOM_CACHE_TTL_SECONDS: {{ .Values.externalServices.projectsInfoService.custom.cache.default.ttlSeconds | quote }} - PROJECTS_INFO_SERVICE_CUSTOM_CACHE_MAXIMUM_SIZE: {{ .Values.externalServices.projectsInfoService.custom.cache.default.maximumSize | quote }} - PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_TTL_SECONDS: {{ .Values.externalServices.projectsInfoService.custom.cache.fallback.ttlSeconds | quote }} - PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_MAXIMUM_SIZE: {{ .Values.externalServices.projectsInfoService.custom.cache.fallback.maximumSize | quote }} - PROJECTS_INFO_SERVICE_MOCK_CLUSTERS: {{ .Values.externalServices.projectsInfoService.mock.clusters | quote }} - PROJECTS_INFO_SERVICE_MOCK_DEFAULT_PROJECTS: {{ .Values.externalServices.projectsInfoService.mock.projects.default | quote }} - PROJECTS_INFO_SERVICE_MOCK_USER_PROJECTS: {{ .Values.externalServices.projectsInfoService.mock.projects.users | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_US_TEST_URL: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.usTest.url | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_EU_DEV_URL: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.euDev.url | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_US_DEV_URL: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.usDev.url | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_CN_DEV_URL: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.cnDev.url | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_INH_DEV_URL: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.inhDev.url | quote }} - PROJECTS_INFO_SERVICE_PLATFORMS_BASE_PATH: {{ .Values.externalServices.projectsInfoService.platforms.basePath | quote }} - PROJECTS_INFO_SERVICE_PLATFORMS_US_TEST_CLUSTER: {{ .Values.externalServices.projectsInfoService.platforms.clusters.usTest | quote }} - PROJECTS_INFO_SERVICE_PLATFORMS_EU_CLUSTER: {{ .Values.externalServices.projectsInfoService.platforms.clusters.euDev | quote }} - PROJECTS_INFO_SERVICE_PLATFORMS_US_CLUSTER: {{ .Values.externalServices.projectsInfoService.platforms.clusters.usDev | quote }} - PROJECTS_INFO_SERVICE_PLATFORMS_CN_CLUSTER: {{ .Values.externalServices.projectsInfoService.platforms.clusters.cnDev | quote }} - PROJECTS_INFO_SERVICE_PLATFORMS_INH_CLUSTER: {{ .Values.externalServices.projectsInfoService.platforms.clusters.inhDev | quote }} {{- end }} diff --git a/ods-api-service/chart/templates/external-service-projects-info-service/external-service-projects-info-service-secret.yaml b/ods-api-service/chart/templates/external-service-projects-info-service/external-service-projects-info-service-secret.yaml deleted file mode 100644 index 9fcc0af8b..000000000 --- a/ods-api-service/chart/templates/external-service-projects-info-service/external-service-projects-info-service-secret.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{- if .Values.externalServices.projectsInfoService.enabled }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "chart.fullname" . }}-projects-info-service-credentials - labels: - {{- include "chart.labels" . | nindent 4 }} - app.kubernetes.io/component: projects-info-service-credentials -type: Opaque -data: - {{- if .Values.externalServices.projectsInfoService.ssl.trustStorePassword }} - PROJECTS_INFO_SERVICE_SSL_TRUSTSTORE_PASSWORD: {{ .Values.externalServices.projectsInfoService.ssl.trustStorePassword | b64enc | quote }} - {{- end }} - PROJECTS_INFO_SERVICE_AZURE_ACCESS_TOKEN: {{ .Values.externalServices.projectsInfoService.azure.accessToken | b64enc | quote }} - PROJECTS_INFO_SERVICE_TESTING_HUB_API_TOKEN: {{ .Values.externalServices.projectsInfoService.testingHub.api.token | b64enc | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_US_TEST_TOKEN: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.usTest.token | b64enc | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_EU_DEV_TOKEN: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.euDev.token | b64enc | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_US_DEV_TOKEN: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.usDev.token | b64enc | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_CN_DEV_TOKEN: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.cnDev.token | b64enc | quote }} - PROJECTS_INFO_SERVICE_OPENSHIFT_INH_DEV_TOKEN: {{ .Values.externalServices.projectsInfoService.openshift.api.clusters.inhDev.token | b64enc | quote }} - PROJECTS_INFO_SERVICE_PLATFORMS_BEARER_TOKEN: {{ .Values.externalServices.projectsInfoService.platforms.bearer.token | b64enc | quote }} -{{- end }} - diff --git a/ods-api-service/chart/templates/tpl/_application.tpl b/ods-api-service/chart/templates/tpl/_application.tpl index 496697516..33970f947 100644 --- a/ods-api-service/chart/templates/tpl/_application.tpl +++ b/ods-api-service/chart/templates/tpl/_application.tpl @@ -157,101 +157,5 @@ externalservices: {{- if .Values.externalServices.projectsInfoService.enabled }} projects-info-service: base-url: ${PROJECTS_INFO_SERVICE_BASE_URL:http://localhost:8081} - ssl: - verify-certificates: ${PROJECTS_INFO_SERVICE_SSL_VERIFY:true} - trust-store-path: ${PROJECTS_INFO_SERVICE_SSL_TRUSTSTORE_PATH:} - trust-store-password: ${PROJECTS_INFO_SERVICE_SSL_TRUSTSTORE_PASSWORD:} - trust-store-type: ${PROJECTS_INFO_SERVICE_SSL_TRUSTSTORE_TYPE:JKS} - azure: - access-token: ${PROJECTS_INFO_SERVICE_AZURE_ACCESS_TOKEN:tbc} - datahub: - group-id: ${PROJECTS_INFO_SERVICE_AZURE_DATA_HUB_GROUP_ID:tbc} - groups: - page-size: ${PROJECTS_INFO_SERVICE_AZURE_GROUPS_PAGE_SIZE:100} - testing-hub: - default: - projects: ${PROJECTS_INFO_SERVICE_TESTING_HUB_DEFAULT_PROJECTS:tbc} - api: - url: ${PROJECTS_INFO_SERVICE_TESTING_HUB_API_URL:tbc} - token: ${PROJECTS_INFO_SERVICE_TESTING_HUB_API_TOKEN:tbc} - page-size: ${PROJECTS_INFO_SERVICE_TESTING_HUB_API_PAGE_SIZE:100} - custom: - cache: - specs: - userGroups: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_TTL_SECONDS:60} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_MAXIMUM_SIZE:100} - userGroups-fallback: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_TTL_SECONDS:120} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_MAXIMUM_SIZE:100} - userEmail: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_TTL_SECONDS:60} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_MAXIMUM_SIZE:100} - userEmail-fallback: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_TTL_SECONDS:120} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_MAXIMUM_SIZE:100} - allEdpProjects: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_TTL_SECONDS:60} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_MAXIMUM_SIZE:100} - allEdpProjects-fallback: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_TTL_SECONDS:120} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_MAXIMUM_SIZE:100} - projectsInfoCache: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_TTL_SECONDS:60} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_MAXIMUM_SIZE:100} - projectsInfoCache-fallback: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_TTL_SECONDS:120} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_MAXIMUM_SIZE:100} - openshiftProjects: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_TTL_SECONDS:60} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_MAXIMUM_SIZE:100} - openshiftProjects-fallback: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_TTL_SECONDS:120} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_MAXIMUM_SIZE:100} - dataHubGroups: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_TTL_SECONDS:120} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_MAXIMUM_SIZE:100} - testingHubGroups: - ttl: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_TTL_SECONDS:120} - maxSize: ${PROJECTS_INFO_SERVICE_CUSTOM_CACHE_FALLBACK_MAXIMUM_SIZE:100} - mock: - clusters: ${PROJECTS_INFO_SERVICE_MOCK_CLUSTERS:tbc} - projects: - default: ${PROJECTS_INFO_SERVICE_MOCK_DEFAULT_PROJECTS:tbc} - users: ${PROJECTS_INFO_SERVICE_MOCK_USER_PROJECTS:tbc} - openshift: - api: - clusters: - us-test: - url: ${PROJECTS_INFO_SERVICE_OPENSHIFT_US_TEST_URL:tbc} - token: ${PROJECTS_INFO_SERVICE_OPENSHIFT_US_TEST_TOKEN:tbc} - eu-dev: - url: ${PROJECTS_INFO_SERVICE_OPENSHIFT_EU_DEV_URL:tbc} - token: ${PROJECTS_INFO_SERVICE_OPENSHIFT_EU_DEV_TOKEN:tbc} - us-dev: - url: ${PROJECTS_INFO_SERVICE_OPENSHIFT_US_DEV_URL:tbc} - token: ${PROJECTS_INFO_SERVICE_OPENSHIFT_US_DEV_TOKEN:tbc} - cn-dev: - url: ${PROJECTS_INFO_SERVICE_OPENSHIFT_CN_DEV_URL:tbc} - token: ${PROJECTS_INFO_SERVICE_OPENSHIFT_CN_DEV_TOKEN:tbc} - inh-dev: - url: ${PROJECTS_INFO_SERVICE_OPENSHIFT_INH_DEV_URL:tbc} - token: ${PROJECTS_INFO_SERVICE_OPENSHIFT_INH_DEV_TOKEN:tbc} - project: - url: /apis/project.openshift.io/v1/projects - platforms: - bearer-token: ${PROJECTS_INFO_SERVICE_PLATFORMS_BEARER_TOKEN:tbc} - base-path: ${PROJECTS_INFO_SERVICE_PLATFORMS_BASE_PATH:tbc} - clusters: - us-test: ${PROJECTS_INFO_SERVICE_PLATFORMS_US_TEST_CLUSTER:tbc} - eu-dev: ${PROJECTS_INFO_SERVICE_PLATFORMS_EU_CLUSTER:tbc} - us-dev: ${PROJECTS_INFO_SERVICE_PLATFORMS_US_CLUSTER:tbc} - cn-dev: ${PROJECTS_INFO_SERVICE_PLATFORMS_CN_CLUSTER:tbc} - inh-dev: ${PROJECTS_INFO_SERVICE_PLATFORMS_INH_CLUSTER:tbc} - project: - filter: - project-roles-group-prefix: BI-AS-ATLASSIAN-P - # Properties to be used as lists cannot have leading or trailing blanks. - project-roles-group-suffixes: TEAM,MANAGER,STAKEHOLDER {{- end }} {{- end -}} diff --git a/ods-api-service/chart/values.yaml.template b/ods-api-service/chart/values.yaml.template index 4cd778e73..a40fe9633 100644 --- a/ods-api-service/chart/values.yaml.template +++ b/ods-api-service/chart/values.yaml.template @@ -213,65 +213,6 @@ externalServices: projectsInfoService: enabled: true baseUrl: "$PROJECTS_INFO_SERVICE_BASE_URL" - ssl: - verifyCertificates: "false" - trustStorePath: "" - trustStorePassword: "" - trustStoreType: "JKS" - azure: - accessToken: "$AZURE_ACCESS_TOKEN" - datahub: - groupId: "$AZURE_DATAHUB_GROUP_ID" - groups: - pageSize: 100 - testingHub: - default: - projects: "$TESTING_HUB_DEFAULT_PROJECTS" - api: - url: "$TESTING_HUB_API_URL" - token: "$TESTING_HUB_API_TOKEN" - pageSize: 100 - custom: - cache: - default: - ttlSeconds: 3600 # seconds (1 hour) - maximumSize: 1000 - fallback: - ttlSeconds: 7200 # seconds (2 hour) - maximumSize: 1000 - mock: - clusters: "1-test, 2-test, 3-test" - projects: - default: "PROJECT-1:1-test, PROJECT-2-cn, PROJECT-3, SORIAOLI:1-test, VILAXICOIP:1-test, FELST:1-test" - users: "{PEPE:[PROJECT-3, PROJECT-4]; PPT:[PROJECT-3, PROJECT-5]}" - openshift: - api: - clusters: - usTest: - url: "$OPENSHIFT_USTEST_API_URL" - token: $OPENSHIFT_USTEST_TOKEN - euDev: - url: "$OPENSHIFT_EUDEV_API_URL" - token: $OPENSHIFT_EUDEV_TOKEN - usDev: - url: "$OPENSHIFT_USDEV_API_URL" - token: $OPENSHIFT_USDEV_TOKEN - cnDev: - url: "$OPENSHIFT_CNDEV_API_URL" - token: $OPENSHIFT_CNDEV_TOKEN - inhDev: - url: "$OPENSHIFT_INHDEV_API_URL" - token: $OPENSHIFT_INHDEV_TOKEN - platforms: - basePath: "$BITBUCKET_PLATFORMS_BASE_PATH" - clusters: - usTest: "$BITBUCKET_PLATFORMS_USTEST" - euDev: "$BITBUCKET_PLATFORMS_EUDEV" - usDev: "$BITBUCKET_PLATFORMS_USDEV" - cnDev: "$BITBUCKET_PLATFORMS_CNDEV" - inhDev: "$BITBUCKET_PLATFORMS_INHDEV" - bearer: - token: $BITBUCKET_PLATFORMS_BEARER_TOKEN # Add OpenShift instances with tokens as needed openshift: instances: [] From 1f52dd091ebc47297816d25421607301eaef1891 Mon Sep 17 00:00:00 2001 From: brais <26645694+BraisVQ@users.noreply.github.com> Date: Wed, 25 Feb 2026 18:28:25 +0100 Subject: [PATCH 6/8] Fix e2e jenkins artifacts fetch (#1369) --- CHANGELOG.md | 1 + .../scripts/get-artifact-from-jenkins-run.sh | 34 ++++++++++++------- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e46738d6..539cdd198 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - Remove unnecessary configurations from ods-api-service ([#1366](https://github.com/opendevstack/ods-core/pull/1366)) ### Fixed +- Fix e2e jenkins artifacts fetch ([#1369](https://github.com/opendevstack/ods-core/pull/1369)) ## [4.11.1] - 2025-12-05 diff --git a/tests/scripts/get-artifact-from-jenkins-run.sh b/tests/scripts/get-artifact-from-jenkins-run.sh index e9c63166e..d86689d42 100755 --- a/tests/scripts/get-artifact-from-jenkins-run.sh +++ b/tests/scripts/get-artifact-from-jenkins-run.sh @@ -6,21 +6,31 @@ PROJECT=$2 BUILD_NAME=$1 BUILD_URL=$(oc get -n ${PROJECT} build ${BUILD_NAME} -o jsonpath='{.metadata.annotations.openshift\.io/jenkins-build-uri}') echo $BUILD_URL -if [[ "$3" == *"/"* ]]; then - ARTIFACT_URL="${BUILD_URL}/artifact/${3}" -else - ARTIFACT_URL="${BUILD_URL}/artifact/artifacts/${3}" -fi + +# Strip trailing slash to avoid double slashes in URL +BUILD_URL=${BUILD_URL%/} # Extract just the filename from the artifact path for local storage ARTIFACT_FILENAME=$(basename "$3") OUTPUT_PATH="/tmp/${ARTIFACT_FILENAME}" -echo "grabbing artifact from $ARTIFACT_URL - and storing in ${OUTPUT_PATH}" +# Define candidate artifact URLs +ARTIFACT_URLS=( + "${BUILD_URL}/artifact/${3}" + "${BUILD_URL}/artifact/artifacts/${3}" +) + TOKEN=$(oc whoami --show-token) -httpCode=$(curl --insecure -sS ${ARTIFACT_URL} --header "Authorization: Bearer ${TOKEN}" -o "${OUTPUT_PATH}" -w "%{http_code}") -echo "response: $httpCode" -if [ ! "${httpCode}" == "200" ]; then - echo "Could not find artifact $3 - url: $ARTIFACT_URL" - exit 1 -fi +echo "grabbing artifact from $ARTIFACT_FILENAME - and storing in ${OUTPUT_PATH}" + +# Try each URL until one succeeds +for url in "${ARTIFACT_URLS[@]}"; do + httpCode=$(curl --insecure -sS "${url}" --header "Authorization: Bearer ${TOKEN}" -o "${OUTPUT_PATH}" -w "%{http_code}") + echo "trying: $url - response: $httpCode" + if [ "${httpCode}" == "200" ]; then + exit 0 + fi +done + +echo "Could not find artifact $3" +exit 1 From 4ca669961b4826b9595099caf56d3186665b17b3 Mon Sep 17 00:00:00 2001 From: brais <26645694+BraisVQ@users.noreply.github.com> Date: Mon, 2 Mar 2026 16:00:17 +0100 Subject: [PATCH 7/8] Default config for ods api set to version 0.0.2 --- configuration-sample/ods-core.ods-api-service.env.sample | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configuration-sample/ods-core.ods-api-service.env.sample b/configuration-sample/ods-core.ods-api-service.env.sample index 3b0f8a721..274c43c09 100644 --- a/configuration-sample/ods-core.ods-api-service.env.sample +++ b/configuration-sample/ods-core.ods-api-service.env.sample @@ -4,7 +4,7 @@ # OpenDevStack Api Service Version # See https://github.com/opendevstack/ods-api-service/releases -ODS_API_SERVICE_VERSION=0.0.1 +ODS_API_SERVICE_VERSION=0.0.2 # JVM configuration JAVA_OPTS=-Xmx1g -Djavax.net.ssl.trustStore=/home/default/custom-truststore.jks -Djavax.net.ssl.trustStorePassword=changeit From c00627529166e5290687801ceae680dba3bd8c26 Mon Sep 17 00:00:00 2001 From: brais <26645694+BraisVQ@users.noreply.github.com> Date: Mon, 2 Mar 2026 16:02:30 +0100 Subject: [PATCH 8/8] Release 4.12.0 Improved automatic tests, updated keystore path, added allure plugin, and removed unnecessary configurations. --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 539cdd198..ebef1463f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,11 @@ ### Added +### Changed + +### Fixed + +## [4.12.0] - 2026-03-02 ### Changed - Improved automatic tests for the quickstarters ([#1362](https://github.com/opendevstack/ods-core/pull/1362)) - Update keystore path in import_certs.sh script ([#1365](https://github.com/opendevstack/ods-core/pull/1365))