diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a4354c59d6d..4141c258235 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,12 +4,29 @@ include: - local: ".gitlab/macrobenchmarks.yml" - local: ".gitlab/exploration-tests.yml" - local: ".gitlab/ci-visibility-tests.yml" + - project: 'DataDog/apm-reliability/apm-sdks-benchmarks' + file: '.gitlab/ci-java-spring-petclinic.yml' + ref: 'main' + - project: 'DataDog/apm-reliability/apm-sdks-benchmarks' + file: '.gitlab/ci-java-insecure-bank.yml' + ref: 'main' + - project: 'DataDog/apm-reliability/apm-sdks-benchmarks' + file: '.gitlab/ci-java-dacapo.yml' + ref: 'main' stages: - build - publish - shared-pipeline - benchmarks + - infrastructure + - java-spring-petclinic-tests + - java-spring-petclinic-macrobenchmarks + - java-startup-microbenchmarks + - java-load-microbenchmarks + - java-dacapo-microbenchmarks + - benchmark-conversion + - generate-slos - macrobenchmarks - tests - exploration-tests diff --git a/.gitlab/benchmarks.yml b/.gitlab/benchmarks.yml index 28aef7ad58b..391b4a7c6eb 100644 --- a/.gitlab/benchmarks.yml +++ b/.gitlab/benchmarks.yml @@ -30,82 +30,410 @@ UPSTREAM_BRANCH: $CI_COMMIT_REF_NAME # The branch or tag name for which project is built. UPSTREAM_COMMIT_SHA: $CI_COMMIT_SHA # The commit revision the project is built for. -benchmarks-startup: - extends: .benchmarks - script: - - !reference [ .benchmarks, script ] - - ./steps/capture-hardware-software-info.sh - - ./steps/run-benchmarks.sh startup - - ./steps/analyze-results.sh startup +# Conversion template for benchmark artifacts +.convert-benchmarks: + timeout: 1h + tags: ["arch:amd64"] + image: registry.ddbuild.io/images/benchmarking-platform-tools-ubuntu:88265497 + rules: + - if: '$POPULATE_CACHE' + when: never + - if: '$CI_COMMIT_TAG =~ /^v?[0-9]+\.[0-9]+\.[0-9]+$/' + when: manual + allow_failure: true + - if: '$CI_COMMIT_BRANCH == "master"' + when: on_success + interruptible: false + - when: on_success + interruptible: true + before_script: + - export ARTIFACTS_DIR="$(pwd)/reports" && mkdir -p "${ARTIFACTS_DIR}" + - export CONVERTED_DIR="${ARTIFACTS_DIR}/converted" && mkdir -p "${CONVERTED_DIR}" + # Determine baseline_or_candidate based on branch + - | + if [ "$CI_COMMIT_BRANCH" == "master" ]; then + export BASELINE_OR_CANDIDATE="baseline" + else + export BASELINE_OR_CANDIDATE="candidate" + fi + # Extract version from upstream.env if available + - | + if [ -f upstream.env ]; then + source upstream.env + export UPSTREAM_TRACER_VERSION="${UPSTREAM_TRACER_VERSION:-unknown}" + else + export UPSTREAM_TRACER_VERSION="unknown" + fi + # Get CPU model and kernel version + - | + if command -v lscpu >/dev/null 2>&1; then + export CPU_MODEL=$(lscpu | grep -m1 "Model name:" | sed 's/Model name:[[:space:]]*//' || echo "unknown") + elif [ -f /proc/cpuinfo ]; then + export CPU_MODEL=$(grep -m1 "model name" /proc/cpuinfo 2>/dev/null | cut -d: -f2 | sed 's/^[[:space:]]*//' || echo "unknown") + else + export CPU_MODEL="unknown" + fi + - export KERNEL_VERSION=$(uname -a || echo "Unknown") + - export CI_JOB_DATE=$(date +%s) + - export CI_COMMIT_SHORT_SHA="${CI_COMMIT_SHORT_SHA:-${CI_COMMIT_SHA:0:7}}" + - export CI_COMMIT_TIMESTAMP="${CI_COMMIT_TIMESTAMP:-$(date +%s)}" + artifacts: + name: "converted-benchmarks" + paths: + - reports/converted/ + expire_in: 3 months + when: always + variables: + UPSTREAM_PROJECT_ID: $CI_PROJECT_ID + UPSTREAM_PROJECT_NAME: $CI_PROJECT_NAME + UPSTREAM_BRANCH: $CI_COMMIT_REF_NAME + UPSTREAM_COMMIT_SHA: $CI_COMMIT_SHA -benchmarks-load: - extends: .benchmarks - script: - - !reference [ .benchmarks, script ] - - ./steps/capture-hardware-software-info.sh - - ./steps/run-benchmarks.sh load - - ./steps/analyze-results.sh load +# benchmarks-startup: +# extends: .benchmarks +# script: +# - !reference [ .benchmarks, script ] +# - ./steps/capture-hardware-software-info.sh +# - ./steps/run-benchmarks.sh startup +# - ./steps/analyze-results.sh startup -benchmarks-dacapo: - extends: .benchmarks - script: - - !reference [ .benchmarks, script ] - - ./steps/capture-hardware-software-info.sh - - ./steps/run-benchmarks.sh dacapo - - ./steps/analyze-results.sh dacapo +# benchmarks-load: +# extends: .benchmarks +# script: +# - !reference [ .benchmarks, script ] +# - ./steps/capture-hardware-software-info.sh +# - ./steps/run-benchmarks.sh load +# - ./steps/analyze-results.sh load -benchmarks-post-results: - extends: .benchmarks - tags: ["arch:amd64"] - script: - - !reference [ .benchmarks, script ] - - ./steps/upload-results-to-s3.sh - - ./steps/post-pr-comment.sh +# benchmarks-dacapo: +# extends: .benchmarks +# script: +# - !reference [ .benchmarks, script ] +# - ./steps/capture-hardware-software-info.sh +# - ./steps/run-benchmarks.sh dacapo +# - ./steps/analyze-results.sh dacapo + +# Convert startup benchmark artifacts +convert-startup-benchmarks: + extends: .convert-benchmarks + stage: benchmark-conversion needs: - - job: benchmarks-startup + - job: linux-java-spring-petclinic-microbenchmark-startup-tracing artifacts: true - - job: benchmarks-load + - job: linux-java-spring-petclinic-microbenchmark-startup-profiling artifacts: true - - job: benchmarks-dacapo + - job: linux-java-spring-petclinic-microbenchmark-startup-appsec artifacts: true + - job: linux-java-spring-petclinic-microbenchmark-startup-iast + artifacts: true + - job: linux-java-insecure-bank-microbenchmark-startup-tracing + artifacts: true + - job: linux-java-insecure-bank-microbenchmark-startup-iast + artifacts: true + rules: + - if: '$POPULATE_CACHE' + when: never + - if: '$CI_COMMIT_TAG =~ /^v?[0-9]+\.[0-9]+\.[0-9]+$/' + when: manual + allow_failure: true + - if: '$CI_COMMIT_BRANCH == "master"' + when: on_success + interruptible: false + - when: on_success + interruptible: true + script: + - !reference [.convert-benchmarks, before_script] + - | + echo "=== Converting startup benchmark artifacts ===" + # Check if artifacts directory exists + if [ ! -d "artifacts" ]; then + echo "WARNING: artifacts directory not found. No startup benchmark artifacts to convert." + exit 0 + fi + # Find all startup benchmark artifacts + # Artifacts are in artifacts/startup-{app}/{variant}/startup_*.csv + find artifacts -type d -name "startup-*" 2>/dev/null | while read app_dir; do + app_name=$(basename "$app_dir" | sed 's/startup-//') + echo "Processing startup artifacts for application: $app_name" + + # Find all variant directories + find "$app_dir" -mindepth 1 -maxdepth 1 -type d | while read variant_dir; do + variant=$(basename "$variant_dir") + echo " Processing variant: $variant" + + # Check if there are CSV files + if [ -n "$(find "$variant_dir" -name "startup_*.csv" 2>/dev/null | head -1)" ]; then + # Build extra_params JSON + extra_params="{\ + \"baseline_or_candidate\":\"${BASELINE_OR_CANDIDATE}\", \ + \"application\":\"${app_name}\", \ + \"release_version\":\"${UPSTREAM_TRACER_VERSION}\", \ + \"cpu_model\":\"${CPU_MODEL}\", \ + \"kernel_version\":\"${KERNEL_VERSION}\", \ + \"ci_job_date\":\"${CI_JOB_DATE}\", \ + \"ci_job_id\":\"${CI_JOB_ID}\", \ + \"ci_pipeline_id\":\"${CI_PIPELINE_ID}\", \ + \"git_commit_sha\":\"${CI_COMMIT_SHORT_SHA}\", \ + \"git_commit_date\":\"${CI_COMMIT_TIMESTAMP}\", \ + \"git_branch\":\"${CI_COMMIT_REF_NAME}\" \ + }" + + # Convert using JavaStartup converter from relenv-benchmark-analyzer + output_file="${CONVERTED_DIR}/startup/${app_name}/${variant}/benchmark-${BASELINE_OR_CANDIDATE}.json" + mkdir -p "$(dirname "$output_file")" + + benchmark_analyzer convert \ + --framework=javastartup \ + --extra-params="$extra_params" \ + --outpath="$output_file" \ + "$variant_dir" || { + echo "ERROR: Failed to convert startup artifacts for $app_name/$variant" + continue + } + + echo "Converted to: $output_file" + else + echo "WARNING: No CSV files found in $variant_dir" + fi + done + done + - | + if [ ! -d "${CONVERTED_DIR}/startup" ] || [ -z "$(find "${CONVERTED_DIR}/startup" -name "*.json" 2>/dev/null)" ]; then + echo "WARNING: No startup benchmark artifacts were converted. This is expected if benchmark jobs didn't run." + fi -check-big-regressions: - extends: .benchmarks +# Convert dacapo benchmark artifacts +convert-dacapo-benchmarks: + extends: .convert-benchmarks + stage: benchmark-conversion needs: - - job: benchmarks-startup + - job: linux-java-dacapo-microbenchmark-baseline artifacts: true - - job: benchmarks-dacapo + - job: linux-java-dacapo-microbenchmark-tracing + artifacts: true + - job: linux-java-dacapo-microbenchmark-profiling + artifacts: true + - job: linux-java-dacapo-microbenchmark-appsec + artifacts: true + - job: linux-java-dacapo-microbenchmark-iast + artifacts: true + - job: linux-java-dacapo-microbenchmark-iast_GLOBAL artifacts: true - when: on_success - tags: ["arch:amd64"] rules: - if: '$POPULATE_CACHE' when: never - - if: '$CI_COMMIT_BRANCH =~ /backport-pr-/' + - if: '$CI_COMMIT_TAG =~ /^v?[0-9]+\.[0-9]+\.[0-9]+$/' + when: manual + allow_failure: true + - if: '$CI_COMMIT_BRANCH == "master"' + when: on_success + interruptible: false + - when: on_success + interruptible: true + script: + - !reference [.convert-benchmarks, before_script] + - | + echo "=== Converting dacapo benchmark artifacts ===" + # Check if artifacts directory exists + if [ ! -d "artifacts" ]; then + echo "WARNING: artifacts directory not found. No dacapo benchmark artifacts to convert." + exit 0 + fi + # Find all dacapo benchmark artifacts + # Artifacts are in artifacts/dacapo/{variant}/{benchmark}/ + find artifacts -type d -path "*/dacapo/*" 2>/dev/null | while read dacapo_dir; do + # Extract variant and benchmark from path: artifacts/dacapo/{variant}/{benchmark} + path_parts=$(echo "$dacapo_dir" | sed 's|artifacts/dacapo/||') + variant=$(echo "$path_parts" | cut -d'/' -f1) + benchmark=$(echo "$path_parts" | cut -d'/' -f2) + + if [ -z "$benchmark" ] || [ "$benchmark" == "$variant" ]; then + # Try alternative path structure + benchmark=$(basename "$dacapo_dir") + variant=$(basename "$(dirname "$dacapo_dir")") + fi + + echo "Processing dacapo artifacts: variant=$variant, benchmark=$benchmark" + + # Check if there are CSV files in scratch/ or log files + if [ -n "$(find "$dacapo_dir" -name "dacapo-latency-usec-simple-*.csv" -path "*/scratch/*" 2>/dev/null | head -1)" ] || \ + [ -n "$(find "$dacapo_dir" -name "dacapo-*.log" 2>/dev/null | head -1)" ]; then + # Build extra_params JSON + extra_params="{\ + \"baseline_or_candidate\":\"${BASELINE_OR_CANDIDATE}\", \ + \"application\":\"${benchmark}\", \ + \"release_version\":\"${UPSTREAM_TRACER_VERSION}\", \ + \"cpu_model\":\"${CPU_MODEL}\", \ + \"kernel_version\":\"${KERNEL_VERSION}\", \ + \"ci_job_date\":\"${CI_JOB_DATE}\", \ + \"ci_job_id\":\"${CI_JOB_ID}\", \ + \"ci_pipeline_id\":\"${CI_PIPELINE_ID}\", \ + \"git_commit_sha\":\"${CI_COMMIT_SHORT_SHA}\", \ + \"git_commit_date\":\"${CI_COMMIT_TIMESTAMP}\", \ + \"git_branch\":\"${CI_COMMIT_REF_NAME}\", \ + \"variant\":\"${variant}\" \ + }" + + # Convert using JavaDacapo converter from relenv-benchmark-analyzer + output_file="${CONVERTED_DIR}/dacapo/${variant}/${benchmark}/benchmark-${BASELINE_OR_CANDIDATE}.json" + mkdir -p "$(dirname "$output_file")" + + benchmark_analyzer convert \ + --framework=javadacapo \ + --extra-params="$extra_params" \ + --outpath="$output_file" \ + "$dacapo_dir" || { + echo "ERROR: Failed to convert dacapo artifacts for $variant/$benchmark" + continue + } + + echo "Converted to: $output_file" + else + echo "WARNING: No CSV or log files found in $dacapo_dir" + fi + done + - | + if [ ! -d "${CONVERTED_DIR}/dacapo" ] || [ -z "$(find "${CONVERTED_DIR}/dacapo" -name "*.json" 2>/dev/null)" ]; then + echo "WARNING: No dacapo benchmark artifacts were converted. This is expected if benchmark jobs didn't run." + fi + +# Convert load benchmark artifacts +convert-load-benchmarks: + extends: .convert-benchmarks + stage: benchmark-conversion + needs: + - job: linux-java-spring-petclinic-microbenchmark-load-tracing + artifacts: true + - job: linux-java-spring-petclinic-microbenchmark-load-profiling + artifacts: true + - job: linux-java-spring-petclinic-microbenchmark-load-appsec + artifacts: true + - job: linux-java-spring-petclinic-microbenchmark-load-iast + artifacts: true + - job: linux-java-spring-petclinic-microbenchmark-load-code_origins + artifacts: true + - job: linux-java-insecure-bank-microbenchmark-load-no_agent + artifacts: true + - job: linux-java-insecure-bank-microbenchmark-load-tracing + artifacts: true + - job: linux-java-insecure-bank-microbenchmark-load-profiling + artifacts: true + - job: linux-java-insecure-bank-microbenchmark-load-iast + artifacts: true + - job: linux-java-insecure-bank-microbenchmark-load-iast_GLOBAL + artifacts: true + - job: linux-java-insecure-bank-microbenchmark-load-iast_FULL + artifacts: true + rules: + - if: '$POPULATE_CACHE' when: never - - if: '$CI_COMMIT_BRANCH !~ /^(master|release\/)/' + - if: '$CI_COMMIT_TAG =~ /^v?[0-9]+\.[0-9]+\.[0-9]+$/' + when: manual + allow_failure: true + - if: '$CI_COMMIT_BRANCH == "master"' when: on_success - - when: never - # ARTIFACTS_DIR /go/src/github.com/DataDog/apm-reliability/dd-trace-java/reports/ - # need to convert them + interruptible: false + - when: on_success + interruptible: true script: - - !reference [ .benchmarks, script ] - - | - for benchmarkType in startup dacapo; do - find "$ARTIFACTS_DIR/$benchmarkType" -name "benchmark-baseline.json" -o -name "benchmark-candidate.json" | while read file; do - relpath="${file#$ARTIFACTS_DIR/$benchmarkType/}" - prefix="${relpath%/benchmark-*}" # Remove the trailing /benchmark-(baseline|candidate).json - prefix="${prefix#./}" # Remove any leading ./ - prefix="${prefix//\//-}" # Replace / with - - case "$file" in - *benchmark-baseline.json) type="baseline" ;; - *benchmark-candidate.json) type="candidate" ;; - esac - echo "Moving $file to $ARTIFACTS_DIR/${type}-${prefix}.converted.json" - cp "$file" "$ARTIFACTS_DIR/${type}-${prefix}.converted.json" - done + - !reference [.convert-benchmarks, before_script] + - | + echo "=== Processing load benchmark artifacts ===" + # Check if artifacts directory exists + if [ ! -d "artifacts" ]; then + echo "WARNING: artifacts directory not found. No load benchmark artifacts to process." + exit 0 + fi + find artifacts -name "candidate-*.converted.json" -o -name "baseline-*.converted.json" 2>/dev/null | while read json_file; do + echo "Processing load benchmark file: $json_file" + + # Verify it's valid JSON + if ! python3 -m json.tool "$json_file" > /dev/null 2>&1; then + echo "WARNING: Invalid JSON file: $json_file" + continue + fi + + # Extract information from filename + filename=$(basename "$json_file") + if [[ "$filename" =~ candidate-([^-]+)--([^-]+)--([0-9]+)\.converted\.json ]]; then + stage="${BASH_REMATCH[1]}" + product="${BASH_REMATCH[2]}" + run_id="${BASH_REMATCH[3]}" + type="candidate" + elif [[ "$filename" =~ baseline-([^-]+)--([^-]+)--([0-9]+)\.converted\.json ]]; then + stage="${BASH_REMATCH[1]}" + product="${BASH_REMATCH[2]}" + run_id="${BASH_REMATCH[3]}" + type="baseline" + else + echo "WARNING: Could not parse filename: $filename" + continue + fi + + # Copy to organized structure + output_dir="${CONVERTED_DIR}/load/${product}/${stage}" + mkdir -p "$output_dir" + cp "$json_file" "$output_dir/benchmark-${type}-run${run_id}.json" + + echo "Organized to: $output_dir/benchmark-${type}-run${run_id}.json" done - - bp-runner $CI_PROJECT_DIR/.gitlab/benchmarks/bp-runner.fail-on-regression.yml --debug + - | + if [ ! -d "${CONVERTED_DIR}/load" ] || [ -z "$(find "${CONVERTED_DIR}/load" -name "*.json" 2>/dev/null)" ]; then + echo "WARNING: No load benchmark artifacts were found" + fi + +# benchmarks-post-results: +# extends: .benchmarks +# tags: ["arch:amd64"] +# script: +# - !reference [ .benchmarks, script ] +# - ./steps/upload-results-to-s3.sh +# - ./steps/post-pr-comment.sh +# needs: +# - job: benchmarks-startup +# artifacts: true +# - job: benchmarks-load +# artifacts: true +# - job: benchmarks-dacapo +# artifacts: true + +# check-big-regressions: +# extends: .benchmarks +# needs: +# - job: benchmarks-startup +# artifacts: true +# - job: benchmarks-dacapo +# artifacts: true +# when: on_success +# tags: ["arch:amd64"] +# rules: +# - if: '$POPULATE_CACHE' +# when: never +# - if: '$CI_COMMIT_BRANCH =~ /backport-pr-/' +# when: never +# - if: '$CI_COMMIT_BRANCH !~ /^(master|release\/)/' +# when: on_success +# - when: never +# # ARTIFACTS_DIR /go/src/github.com/DataDog/apm-reliability/dd-trace-java/reports/ +# # need to convert them +# script: +# - !reference [ .benchmarks, script ] +# - | +# for benchmarkType in startup dacapo; do +# find "$ARTIFACTS_DIR/$benchmarkType" -name "benchmark-baseline.json" -o -name "benchmark-candidate.json" | while read file; do +# relpath="${file#$ARTIFACTS_DIR/$benchmarkType/}" +# prefix="${relpath%/benchmark-*}" # Remove the trailing /benchmark-(baseline|candidate).json +# prefix="${prefix#./}" # Remove any leading ./ +# prefix="${prefix//\//-}" # Replace / with - +# case "$file" in +# *benchmark-baseline.json) type="baseline" ;; +# *benchmark-candidate.json) type="candidate" ;; +# esac +# echo "Moving $file to $ARTIFACTS_DIR/${type}-${prefix}.converted.json" +# cp "$file" "$ARTIFACTS_DIR/${type}-${prefix}.converted.json" +# done +# done +# - bp-runner $CI_PROJECT_DIR/.gitlab/benchmarks/bp-runner.fail-on-regression.yml --debug .dsm-kafka-benchmarks: stage: benchmarks diff --git a/.gitlab/macrobenchmarks.yml b/.gitlab/macrobenchmarks.yml index b1c5681fb5b..151dcf496e4 100644 --- a/.gitlab/macrobenchmarks.yml +++ b/.gitlab/macrobenchmarks.yml @@ -92,12 +92,12 @@ check-slo-breaches: artifacts: true - job: otel-latest artifacts: true - - job: benchmarks-startup - artifacts: true - - job: benchmarks-load - artifacts: true - - job: benchmarks-dacapo - artifacts: true + # - job: benchmarks-startup + # artifacts: true + # - job: benchmarks-load + # artifacts: true + # - job: benchmarks-dacapo + # artifacts: true script: # macrobenchmarks are located here, files are already in "converted" format - export ARTIFACTS_DIR="$(pwd)/platform/artifacts/" && mkdir -p "${ARTIFACTS_DIR}" diff --git a/.gitlab/thresholds/java-spring-petclinic.yml b/.gitlab/thresholds/java-spring-petclinic.yml new file mode 100644 index 00000000000..9064e7bd352 --- /dev/null +++ b/.gitlab/thresholds/java-spring-petclinic.yml @@ -0,0 +1,36 @@ +# Thresholds set based on guidance in https://datadoghq.atlassian.net/wiki/x/LgI1LgE#How-to-choose-thresholds-for-pre-release-gates%3F + +experiments: + - name: Run SLO breach check + steps: + - name: SLO breach check + run: fail_on_breach + # https://datadoghq.atlassian.net/wiki/x/LgI1LgE#How-to-choose-a-warning-range-for-pre-release-gates%3F + warning_range: 10 + # File spec + # https://datadoghq.atlassian.net/wiki/x/LgI1LgE#Specification + # Measurements + # https://benchmarking.us1.prod.dog/trends?projectId=4&branch=master&trendsTab=per_scenario + scenarios: + # Note that thresholds there are chosen based the confidence interval with a 10% adjustment. + + # Standard macrobenchmarks + # https://benchmarking.us1.prod.dog/trends?projectId=4&branch=master&trendsTab=per_scenario&scenario=normal_operation%2Fonly-tracing&trendsType=scenario + - name: normal_operation/only-tracing + thresholds: + - agg_http_req_duration_p50 < 2.6 ms + - agg_http_req_duration_p99 < 8.5 ms + # https://benchmarking.us1.prod.dog/trends?projectId=4&branch=master&trendsTab=per_scenario&scenario=normal_operation%2Fotel-latest&trendsType=scenario + - name: normal_operation/otel-latest + thresholds: + - agg_http_req_duration_p50 < 2.5 ms + - agg_http_req_duration_p99 < 10 ms + + # https://benchmarking.us1.prod.dog/trends?projectId=4&branch=master&trendsTab=per_scenario&scenario=high_load%2Fonly-tracing&trendsType=scenario + - name: high_load/only-tracing + thresholds: + - throughput > 1100.0 op/s + # https://benchmarking.us1.prod.dog/trends?projectId=4&branch=master&trendsTab=per_scenario&scenario=high_load%2Fotel-latest&trendsType=scenario + - name: high_load/otel-latest + thresholds: + - throughput > 1100.0 op/s diff --git a/benchmark/Dockerfile b/benchmark/Dockerfile deleted file mode 100644 index 0279186478a..00000000000 --- a/benchmark/Dockerfile +++ /dev/null @@ -1,103 +0,0 @@ -# Petclinic download and compilation stage -FROM eclipse-temurin:17-jammy as petclinic - -ARG SPRING_PETCLINIC_COMMIT=cefaf55dd124d0635abfe857c3c99a3d3ea62017 - -RUN apt-get update \ - && apt-get -y install git \ - && apt-get -y clean \ - && rm -rf /var/lib/apt/lists/* - -RUN set -eux;\ - git init spring-petclinic;\ - cd spring-petclinic;\ - git remote add origin https://github.com/spring-projects/spring-petclinic.git;\ - git fetch --depth 1 origin ${SPRING_PETCLINIC_COMMIT};\ - git checkout ${SPRING_PETCLINIC_COMMIT};\ - ./mvnw dependency:go-offline - -RUN cd spring-petclinic \ - && ./mvnw package -Dmaven.test.skip=true \ - && cp target/*.jar /spring-petclinic.jar - - -# Insecure bank download and compilation stage -FROM eclipse-temurin:17-jammy as insecure-bank - -RUN apt-get update \ - && apt-get -y install git \ - && apt-get -y clean \ - && rm -rf /var/lib/apt/lists/* - -RUN git clone --depth 1 --branch malvarez/spring-boot --single-branch https://github.com/hdiv/insecure-bank.git \ - && cd insecure-bank \ - && ./gradlew -q dependencies - -RUN cd insecure-bank \ - && ./gradlew bootWar \ - && cp build/libs/*.war /insecure-bank.war - -# Dacapo download -FROM debian:bookworm-slim as dacapo -RUN apt-get update \ - && apt-get -y install wget unzip \ - && apt-get -y clean \ - && rm -rf /var/lib/apt/lists/* - -ARG DACAPO_VERSION=23.11-chopin -# The data for the big benchmarks is removed too ensure the final docker image is not too big -RUN wget -nv -O dacapo.zip https://download.dacapobench.org/chopin/dacapo-$DACAPO_VERSION.zip \ - && mkdir /dacapo \ - && unzip dacapo.zip -d /dacapo/ \ - && rm -rf /dacapo/dacapo-$DACAPO_VERSION/dat/luindex \ - && rm -rf /dacapo/dacapo-$DACAPO_VERSION/dat/lusearch \ - && rm -rf /dacapo/dacapo-$DACAPO_VERSION/dat/graphchi \ - && rm dacapo.zip - -FROM debian:bookworm-slim - -RUN apt-get update \ - && apt-get -y install git curl wget procps gettext-base \ - && apt-get -y clean \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=eclipse-temurin:8-jammy /opt/java/openjdk /usr/lib/jvm/8 -COPY --from=eclipse-temurin:11-jammy /opt/java/openjdk /usr/lib/jvm/11 -COPY --from=eclipse-temurin:17-jammy /opt/java/openjdk /usr/lib/jvm/17 - -RUN rm -rf \ - /usr/lib/jvm/*/man \ - /usr/lib/jvm/*/src.zip \ - /usr/lib/jvm/*/lib/src.zip \ - /usr/lib/jvm/*/demo \ - /usr/lib/jvm/*/sample - -ENV JAVA_8_HOME=/usr/lib/jvm/8 -ENV JAVA_11_HOME=/usr/lib/jvm/11 -ENV JAVA_17_HOME=/usr/lib/jvm/17 -ENV JAVA_HOME=${JAVA_8_HOME} -ENV PATH=${PATH}:${JAVA_HOME}/bin - -ARG SIRUN_VERSION=0.1.11 -RUN wget -O sirun.tar.gz https://github.com/DataDog/sirun/releases/download/v$SIRUN_VERSION/sirun-v$SIRUN_VERSION-x86_64-unknown-linux-musl.tar.gz \ - && tar -xzf sirun.tar.gz \ - && rm sirun.tar.gz \ - && mv sirun /usr/bin/sirun - -ARG K6_VERSION=0.45.1 -RUN wget -O k6.tar.gz https://github.com/grafana/k6/releases/download/v$K6_VERSION/k6-v$K6_VERSION-linux-amd64.tar.gz \ - && tar --strip-components=1 -xzf k6.tar.gz \ - && rm k6.tar.gz \ - && mv k6 /usr/bin/k6 - -RUN mkdir -p /app - -COPY --from=petclinic /spring-petclinic.jar /app/spring-petclinic.jar -ENV PETCLINIC=/app/spring-petclinic.jar - -COPY --from=insecure-bank /insecure-bank.war /app/insecure-bank.war -ENV INSECURE_BANK=/app/insecure-bank.war - -COPY --from=dacapo /dacapo/ /app/ -ARG DACAPO_VERSION=23.11-chopin -ENV DACAPO=/app/dacapo-$DACAPO_VERSION.jar diff --git a/benchmark/README.MD b/benchmark/README.MD deleted file mode 100644 index 30f3bbcf864..00000000000 --- a/benchmark/README.MD +++ /dev/null @@ -1,29 +0,0 @@ -# Benchmarks - -This directory contains different types of benchmarks. - -## Running Benchmarks via Docker - -Docker allows the execution of benchmarks without needing to install and configure your development environment. For example, package installation and installation of sirun are performed automatically. - -In order to run benchmarks using Docker, issue the following command from the `benchmark/` folder of this project: - -```sh -./run.sh -``` - -If you run into storage errors (e.g. running out of disk space), try removing all unused Docker containers, networks, and images with `docker system prune -af` before running the script again. Once finished, the reports will be available in the `benchmark/reports/` folder. Note that the script can take ~40 minutes to run. - -### Running specific benchmarks - -If you want to run only a specific category of benchmarks, you can do so via arguments: - -1. Run startup benchmarks -```sh -./run.sh startup [application]? -``` - -2. Run load benchmarks -```sh -./run.sh load [application]? -``` diff --git a/benchmark/benchmarks.sh b/benchmark/benchmarks.sh deleted file mode 100755 index 0b245038afa..00000000000 --- a/benchmark/benchmarks.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash -set -eu - -readonly SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) -export TRACER_DIR="${SCRIPT_DIR}/.." -export REPORTS_DIR="${SCRIPT_DIR}/reports" -export UTILS_DIR="${SCRIPT_DIR}/utils" -export SHELL_UTILS_DIR="${UTILS_DIR}/shell" -export K6_UTILS_DIR="${UTILS_DIR}/k6" -export TRACER="${SCRIPT_DIR}/tracer/dd-java-agent.jar" -export NO_AGENT_VARIANT="no_agent" - -run_benchmarks() { - local type=$1 - if [[ -d "${type}" ]] && [[ -f "${type}/run.sh" ]]; then - cd "${type}" - ./run.sh "$@" - cd "${SCRIPT_DIR}" - fi -} - -# Find or rebuild tracer to be used in the benchmarks -if [[ ! -f "${TRACER}" ]]; then - mkdir -p "${SCRIPT_DIR}/tracer" - cd "${TRACER_DIR}" - readonly TRACER_VERSION=$(./gradlew properties -q | grep "version:" | awk '{print $2}') - readonly TRACER_COMPILED="${SCRIPT_DIR}/../dd-java-agent/build/libs/dd-java-agent-${TRACER_VERSION}.jar" - if [[ ! -f "${TRACER_COMPILED}" ]]; then - echo "Tracer not found, starting gradle compile ..." - ./gradlew assemble - fi - cp "${TRACER_COMPILED}" "${TRACER}" - cd "${SCRIPT_DIR}" -fi - -if [[ "$#" == '0' ]]; then - for type in 'startup' 'load' 'dacapo'; do - run_benchmarks "$type" - done -else - run_benchmarks "$@" -fi diff --git a/benchmark/dacapo/benchmark.json b/benchmark/dacapo/benchmark.json deleted file mode 100644 index ec0ca767f43..00000000000 --- a/benchmark/dacapo/benchmark.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "name": "dacapo_${BENCHMARK}", - "setup": "bash -c \"mkdir -p ${OUTPUT_DIR}/${VARIANT}\"", - "run": "bash -c \"java ${JAVA_OPTS} -jar ${DACAPO} --converge --scratch-directory=${OUTPUT_DIR}/${VARIANT}/scratch --latency-csv ${BENCHMARK} &> ${OUTPUT_DIR}/${VARIANT}/dacapo.log\"", - "timeout": 150, - "iterations": 1, - "variants": { - "${NO_AGENT_VARIANT}": { - "env": { - "VARIANT": "${NO_AGENT_VARIANT}", - "JAVA_OPTS": "" - } - }, - "tracing": { - "env": { - "VARIANT": "tracing", - "JAVA_OPTS": "-javaagent:${TRACER}" - } - }, - "profiling": { - "env": { - "VARIANT": "profiling", - "JAVA_OPTS": "-javaagent:${TRACER} -Ddd.profiling.enabled=true" - } - }, - "appsec": { - "env": { - "VARIANT": "appsec", - "JAVA_OPTS": "-javaagent:${TRACER} -Ddd.appsec.enabled=true -Ddd.iast.enabled=false" - } - }, - "iast": { - "env": { - "VARIANT": "iast", - "JAVA_OPTS": "-javaagent:${TRACER} -Ddd.iast.enabled=true" - } - }, - "iast_GLOBAL": { - "env": { - "VARIANT": "iast_GLOBAL", - "JAVA_OPTS": "-javaagent:${TRACER} -Ddd.iast.enabled=true -Ddd.iast.context.mode=GLOBAL" - } - } - } -} diff --git a/benchmark/dacapo/run.sh b/benchmark/dacapo/run.sh deleted file mode 100755 index ece44f9e5f0..00000000000 --- a/benchmark/dacapo/run.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash -set -eu - -source "${UTILS_DIR}/update-java-version.sh" 11 - -function message() { - echo "$(date +"%T"): $1" -} - -run_benchmark() { - local type=$1 - - message "dacapo benchmark: ${type} started" - - # export the benchmark - export BENCHMARK="${type}" - - # create output folder for the test - export OUTPUT_DIR="${REPORTS_DIR}/dacapo/${type}" - mkdir -p "${OUTPUT_DIR}" - - # substitute environment variables in the json file - benchmark=$(mktemp) - # shellcheck disable=SC2046 - # shellcheck disable=SC2016 - envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" "${benchmark}" - - # run the sirun test - sirun "${benchmark}" &>"${OUTPUT_DIR}/${type}.json" - - message "dacapo benchmark: ${type} finished" -} - -if [ "$#" == '2' ]; then - run_benchmark "$2" -else - for benchmark in biojava tomcat ; do - run_benchmark "${benchmark}" - done -fi - diff --git a/benchmark/load/insecure-bank/k6.js b/benchmark/load/insecure-bank/k6.js deleted file mode 100644 index 2dd800fa7e5..00000000000 --- a/benchmark/load/insecure-bank/k6.js +++ /dev/null @@ -1,76 +0,0 @@ -import http from 'k6/http'; -import {checkResponse, isOk, isRedirect} from "../../utils/k6.js"; - -const variants = { - "no_agent": { - "APP_URL": 'http://localhost:8080', - }, - "tracing": { - "APP_URL": 'http://localhost:8081', - }, - "profiling": { - "APP_URL": 'http://localhost:8082', - }, - "iast": { - "APP_URL": 'http://localhost:8083', - }, - "iast_GLOBAL": { - "APP_URL": 'http://localhost:8084', - }, - "iast_FULL": { - "APP_URL": 'http://localhost:8085', - }, -} - -export const options = function (variants) { - let scenarios = {}; - for (const variant of Object.keys(variants)) { - scenarios[`load--insecure-bank--${variant}--warmup`] = { - executor: 'constant-vus', // https://grafana.com/docs/k6/latest/using-k6/scenarios/executors/#all-executors - vus: 5, - duration: '165s', - gracefulStop: '2s', - env: { - "APP_URL": variants[variant]["APP_URL"] - } - }; - - scenarios[`load--insecure-bank--${variant}--high_load`] = { - executor: 'constant-vus', - vus: 5, - startTime: '167s', - duration: '15s', - gracefulStop: '2s', - env: { - "APP_URL": variants[variant]["APP_URL"] - } - }; - } - - return { - discardResponseBodies: true, - scenarios, - } -}(variants); - -export default function () { - - // login form - const loginResponse = http.post(`${__ENV.APP_URL}/login`, { - username: 'john', - password: 'test' - }, { - redirects: 0 - }); - checkResponse(loginResponse, isRedirect); - - // dashboard - const dashboard = http.get(`${__ENV.APP_URL}/dashboard`); - checkResponse(dashboard, isOk); - - // logout - const logout = http.get(`${__ENV.APP_URL}/j_spring_security_logout`, { - redirects: 0 - }); - checkResponse(logout, isRedirect); -} diff --git a/benchmark/load/insecure-bank/start-servers.sh b/benchmark/load/insecure-bank/start-servers.sh deleted file mode 100755 index 4cae95567f2..00000000000 --- a/benchmark/load/insecure-bank/start-servers.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -set -e - -start_server() { - local VARIANT=$1 - local JAVA_OPTS=$2 - - if [ -n "$CI_JOB_TOKEN" ]; then - # Inside BP, so we can assume 24 CPU cores available and set CPU affinity - CPU_AFFINITY_APP=$3 - else - CPU_AFFINITY_APP="" - fi - - mkdir -p "${LOGS_DIR}/${VARIANT}" - ${CPU_AFFINITY_APP}java ${JAVA_OPTS} -Xms3G -Xmx3G -jar ${INSECURE_BANK} &> ${LOGS_DIR}/${VARIANT}/insecure-bank.log &PID=$! - echo "${CPU_AFFINITY_APP}java ${JAVA_OPTS} -Xms3G -Xmx3G -jar ${INSECURE_BANK} &> ${LOGS_DIR}/${VARIANT}/insecure-bank.log [PID=$PID]" -} - -start_server "no_agent" "-Dserver.port=8080" "taskset -c 47 " & -start_server "tracing" "-javaagent:${TRACER} -Dserver.port=8081" "taskset -c 46 " & -start_server "profiling" "-javaagent:${TRACER} -Ddd.profiling.enabled=true -Dserver.port=8082" "taskset -c 45 " & -start_server "iast" "-javaagent:${TRACER} -Ddd.iast.enabled=true -Dserver.port=8083" "taskset -c 44 " & -start_server "iast_GLOBAL" "-javaagent:${TRACER} -Ddd.iast.enabled=true -Ddd.iast.context.mode=GLOBAL -Dserver.port=8084" "taskset -c 43 " & -start_server "iast_FULL" "-javaagent:${TRACER} -Ddd.iast.enabled=true -Ddd.iast.detection.mode=FULL -Dserver.port=8085" "taskset -c 42 " & - -wait diff --git a/benchmark/load/petclinic/k6.js b/benchmark/load/petclinic/k6.js deleted file mode 100644 index debeab10a8e..00000000000 --- a/benchmark/load/petclinic/k6.js +++ /dev/null @@ -1,61 +0,0 @@ -import http from 'k6/http'; -import {checkResponse, isOk} from "../../utils/k6.js"; - -const variants = { - "no_agent": { - "APP_URL": 'http://localhost:8080', - }, - "tracing": { - "APP_URL": 'http://localhost:8081', - }, - "profiling": { - "APP_URL": 'http://localhost:8082', - }, - "appsec": { - "APP_URL": 'http://localhost:8083', - }, - "iast": { - "APP_URL": 'http://localhost:8084', - }, - "code_origins": { - "APP_URL": 'http://localhost:8085', - } -} - -export const options = function (variants) { - const scenarios = {}; - for (const variant of Object.keys(variants)) { - scenarios[`load--petclinic--${variant}--warmup`] = { - executor: 'constant-vus', // https://grafana.com/docs/k6/latest/using-k6/scenarios/executors/#all-executors - vus: 5, - duration: '165s', - gracefulStop: '2s', - env: { - "APP_URL": variants[variant]["APP_URL"] - } - }; - - scenarios[`load--petclinic--${variant}--high_load`] = { - executor: 'constant-vus', - vus: 5, - startTime: '167s', - duration: '15s', - gracefulStop: '2s', - env: { - "APP_URL": variants[variant]["APP_URL"] - } - }; - } - - return { - discardResponseBodies: true, - scenarios, - } -}(variants); - -export default function () { - - // find owner - const ownersList = http.get(`${__ENV.APP_URL}/owners?lastName=`); - checkResponse(ownersList, isOk); -} diff --git a/benchmark/load/petclinic/start-servers.sh b/benchmark/load/petclinic/start-servers.sh deleted file mode 100755 index 1ebbb4e0418..00000000000 --- a/benchmark/load/petclinic/start-servers.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -set -e - -start_server() { - local VARIANT=$1 - local JAVA_OPTS=$2 - - if [ -n "$CI_JOB_TOKEN" ]; then - # Inside BP, so we can assume 24 CPU cores available and set CPU affinity - CPU_AFFINITY_APP=$3 - else - CPU_AFFINITY_APP="" - fi - - mkdir -p "${LOGS_DIR}/${VARIANT}" - ${CPU_AFFINITY_APP}java ${JAVA_OPTS} -Xms2G -Xmx2G -jar ${PETCLINIC} &> ${LOGS_DIR}/${VARIANT}/petclinic.log &PID=$! - echo "${CPU_AFFINITY_APP}java ${JAVA_OPTS} -Xms2G -Xmx2G -jar ${PETCLINIC} &> ${LOGS_DIR}/${VARIANT}/petclinic.log [PID=$!]" -} - -start_server "no_agent" "-Dserver.port=8080" "taskset -c 31-32 " & -start_server "tracing" "-javaagent:${TRACER} -Dserver.port=8081" "taskset -c 33-34 " & -start_server "profiling" "-javaagent:${TRACER} -Ddd.profiling.enabled=true -Dserver.port=8082" "taskset -c 35-36 " & -start_server "appsec" "-javaagent:${TRACER} -Ddd.appsec.enabled=true -Dserver.port=8083" "taskset -c 37-38 " & -start_server "iast" "-javaagent:${TRACER} -Ddd.iast.enabled=true -Dserver.port=8084" "taskset -c 39-40 " & -start_server "code_origins" "-javaagent:${TRACER} -Ddd.code.origin.for.spans.enabled=true -Dserver.port=8085" "taskset -c 41-42 " & - -wait diff --git a/benchmark/load/run.sh b/benchmark/load/run.sh deleted file mode 100755 index 5f2f265b045..00000000000 --- a/benchmark/load/run.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash - -set -e - -function message() { - echo "$(date +"%T"): $1" -} - -function healthcheck() { - local url=$1 - - while true; do - if [[ $(curl -fso /dev/null -w "%{http_code}" "${url}") = 200 ]]; then - break - fi - done -} - -type=$1 - -if [ -n "$CI_JOB_TOKEN" ]; then - # Inside BP, so we can assume 24 CPU cores on the second socket available and set CPU affinity - export CPU_AFFINITY_K6="taskset -c 24-27 " -else - export CPU_AFFINITY_K6="" -fi - -source "${UTILS_DIR}/update-java-version.sh" 17 - -for app in *; do - if [[ ! -d "${app}" ]]; then - continue - fi - - message "${type} benchmark: ${app} started" - - export OUTPUT_DIR="${REPORTS_DIR}/${type}/${app}" - mkdir -p ${OUTPUT_DIR} - - export LOGS_DIR="${ARTIFACTS_DIR}/${type}/${app}" - mkdir -p ${LOGS_DIR} - - # Using profiler variants for healthcheck as they are the slowest - if [ "${app}" == "petclinic" ]; then - HEALTHCHECK_URL=http://localhost:8082 - REPETITIONS_COUNT=2 - elif [ "${app}" == "insecure-bank" ]; then - HEALTHCHECK_URL=http://localhost:8082/login - REPETITIONS_COUNT=2 - else - echo "Unknown app ${app}" - exit 1 - fi - - for i in $(seq 1 $REPETITIONS_COUNT); do - bash -c "${UTILS_DIR}/../${type}/${app}/start-servers.sh" & - - echo "Waiting for serves to start..." - if [ "${app}" == "petclinic" ]; then - for port in $(seq 8080 8085); do - healthcheck http://localhost:$port - done - elif [ "${app}" == "insecure-bank" ]; then - for port in $(seq 8080 8085); do - healthcheck http://localhost:$port/login - done - fi - echo "Servers are up!" - - ( - cd ${app} && - bash -c "${CPU_AFFINITY_K6}${UTILS_DIR}/run-k6-load-test.sh 'pkill java'" - ) - done - - message "${type} benchmark: ${app} finished" -done diff --git a/benchmark/run.sh b/benchmark/run.sh deleted file mode 100755 index bcd3649e9a0..00000000000 --- a/benchmark/run.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -set -eu - -readonly SCRIPT_DIR="$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd)" -readonly INITIAL_DIR="$(pwd)" -readonly TRACER="${SCRIPT_DIR}/tracer/dd-java-agent.jar" - -cd "${SCRIPT_DIR}" - -# Build container image -echo "Building base image ..." -docker build \ - -t dd-trace-java/benchmark \ - . - -# Find or rebuild tracer to be used in the benchmarks -if [[ ! -f "${TRACER}" ]]; then - mkdir -p "${SCRIPT_DIR}/tracer" - cd "${SCRIPT_DIR}/.." - readonly TRACER_VERSION=$(./gradlew properties -q | grep "version:" | awk '{print $2}') - readonly TRACER_COMPILED="${SCRIPT_DIR}/../dd-java-agent/build/libs/dd-java-agent-${TRACER_VERSION}.jar" - if [ ! -f "${TRACER_COMPILED}" ]; then - echo "Tracer not found, starting gradle compile ..." - ./gradlew assemble - fi - cp "${TRACER_COMPILED}" "${TRACER}" - cd "${SCRIPT_DIR}" -fi - -# Trigger benchmarks -echo "Running benchmarks ..." -docker run --rm \ - -v "${HOME}/.gradle":/home/benchmark/.gradle:delegated \ - -v "${PWD}/..":/tracer:delegated \ - -w /tracer/benchmark \ - -e GRADLE_OPTS="-Dorg.gradle.daemon=false" \ - --entrypoint=./benchmarks.sh \ - --name dd-trace-java-benchmark \ - --cap-add SYS_ADMIN \ - dd-trace-java/benchmark \ - "$@" - -cd "${INITIAL_DIR}" diff --git a/benchmark/startup/insecure-bank/benchmark.json b/benchmark/startup/insecure-bank/benchmark.json deleted file mode 100644 index 17c69a50847..00000000000 --- a/benchmark/startup/insecure-bank/benchmark.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "name": "startup_insecure-bank", - "setup": "bash -c \"mkdir -p ${OUTPUT_DIR}/${VARIANT}\"", - "service": "bash -c \"${UTILS_DIR}/run-on-server-ready.sh http://localhost:8080/login 'pkill java'\"", - "run": "bash -c \"java -javaagent:${TRACER} -Ddd.benchmark.enabled=true -Ddd.benchmark.output.dir=${OUTPUT_DIR}/${VARIANT} ${JAVA_OPTS} -jar ${INSECURE_BANK} &> ${OUTPUT_DIR}/${VARIANT}/insecure-bank.log\"", - "iterations": 10, - "timeout": 60, - "variants": { - "tracing": { - "env": { - "VARIANT": "tracing", - "JAVA_OPTS": "" - } - }, - "iast": { - "env": { - "VARIANT": "iast", - "JAVA_OPTS": "-Ddd.iast.enabled=true" - } - } - } -} diff --git a/benchmark/startup/petclinic/benchmark.json b/benchmark/startup/petclinic/benchmark.json deleted file mode 100644 index 23713c38469..00000000000 --- a/benchmark/startup/petclinic/benchmark.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "name": "startup_petclinic", - "setup": "bash -c \"mkdir -p ${OUTPUT_DIR}/${VARIANT}\"", - "service": "bash -c \"${UTILS_DIR}/run-on-server-ready.sh http://localhost:8080 'pkill java'\"", - "run": "bash -c \"java -javaagent:${TRACER} -Ddd.benchmark.enabled=true -Ddd.benchmark.output.dir=${OUTPUT_DIR}/${VARIANT} ${JAVA_OPTS} -jar ${PETCLINIC} &> ${OUTPUT_DIR}/${VARIANT}/petclinic.log\"", - "iterations": 10, - "timeout": 60, - "variants": { - "tracing": { - "env": { - "VARIANT": "tracing", - "JAVA_OPTS": "" - } - }, - "profiling": { - "env": { - "VARIANT": "profiling", - "JAVA_OPTS": "-Ddd.profiling.enabled=true" - } - }, - "appsec": { - "env": { - "VARIANT": "appsec", - "JAVA_OPTS": "-Ddd.appsec.enabled=true" - } - }, - "iast": { - "env": { - "VARIANT": "iast", - "JAVA_OPTS": "-Ddd.iast.enabled=true" - } - } - } -} diff --git a/benchmark/startup/run.sh b/benchmark/startup/run.sh deleted file mode 100755 index 432c65d3fd5..00000000000 --- a/benchmark/startup/run.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash -set -eu - -source "${UTILS_DIR}/update-java-version.sh" 17 -"${UTILS_DIR}/run-sirun-benchmarks.sh" "$@" diff --git a/benchmark/utils/k6.js b/benchmark/utils/k6.js deleted file mode 100644 index aa5147ae3c8..00000000000 --- a/benchmark/utils/k6.js +++ /dev/null @@ -1,21 +0,0 @@ -import {check} from 'k6'; - -export function checkResponse(response) { - const checks = Array.prototype.slice.call(arguments, 1); - const reduced = checks.reduce((result, current) => Object.assign(result, current), {}); - check(response, reduced); -} - -export const isOk = { - 'is OK': r => r.status === 200 -}; - -export const isRedirect = { - 'is redirect': r => r.status >= 300 && r.status < 400 -}; - -export function bodyContains(text) { - return { - 'body contains': r => r.body.includes(text) - } -} diff --git a/benchmark/utils/run-k6-load-test.sh b/benchmark/utils/run-k6-load-test.sh deleted file mode 100755 index d3415f54eef..00000000000 --- a/benchmark/utils/run-k6-load-test.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env bash -set -eu - -command=$1 -exit_code=0 - -cleanup() { - # run the exit command - bash -c "${command}" - exit $exit_code -} - -trap cleanup EXIT ERR INT TERM - -echo "Starting k6 load test, logs are recorded into ${LOGS_DIR}/k6.log..." - -# run the k6 benchmark and store the result as JSON -k6 run k6.js --out "json=${OUTPUT_DIR}/k6_$(date +%s).json" > "${LOGS_DIR}/k6.log" 2>&1 -exit_code=$? - -echo "k6 load test done !!!" diff --git a/benchmark/utils/run-on-server-ready.sh b/benchmark/utils/run-on-server-ready.sh deleted file mode 100755 index 2aad5aa9f70..00000000000 --- a/benchmark/utils/run-on-server-ready.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env bash -set -eu - -url=$1 -command=$2 -# wait for an HTTP server to come up and runs the selected command -while true; do - if [[ $(curl -fso /dev/null -w "%{http_code}" "${url}") = 200 ]]; then - bash -c "${command}" - fi -done diff --git a/benchmark/utils/run-sirun-benchmarks.sh b/benchmark/utils/run-sirun-benchmarks.sh deleted file mode 100755 index c0bc732dcfa..00000000000 --- a/benchmark/utils/run-sirun-benchmarks.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash -set -eu - -function message() { - echo "$(date +"%T"): $1" -} - -run_benchmark() { - local type=$1 - local app=$2 - if [[ -d "${app}" ]] && [[ -f "${app}/benchmark.json" ]]; then - - message "${type} benchmark: ${app} started" - cd "${app}" - - # create output folder for the test - export OUTPUT_DIR="${REPORTS_DIR}/${type}/${app}" - mkdir -p "${OUTPUT_DIR}" - - # substitute environment variables in the json file - benchmark=$(mktemp) - # shellcheck disable=SC2046 - # shellcheck disable=SC2016 - envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" "${benchmark}" - - # run the sirun test - sirun "${benchmark}" &>"${OUTPUT_DIR}/${app}.json" - - message "${type} benchmark: ${app} finished" - - cd .. - fi -} - -if [ "$#" == '2' ]; then - run_benchmark "$@" -else - for folder in *; do - run_benchmark "$1" "${folder}" - done -fi diff --git a/benchmark/utils/update-java-version.sh b/benchmark/utils/update-java-version.sh deleted file mode 100755 index 3d76603e0ef..00000000000 --- a/benchmark/utils/update-java-version.sh +++ /dev/null @@ -1,5 +0,0 @@ -readonly target=$1 -readonly NEW_PATH=$(echo "${PATH}" | sed -e "s@/usr/lib/jvm/[[:digit:]]\+@/usr/lib/jvm/${target}@g") -export PATH="${NEW_PATH}" - -java --version