Skip to content

release: fix gh run polling json parsing #180

release: fix gh run polling json parsing

release: fix gh run polling json parsing #180

Workflow file for this run

name: test-py
permissions:
actions: read
contents: write
pull-requests: write # Allow writing comments on PRs
issues: write # Allow writing comments on issues
statuses: write # Allow writing statuses on PRs
discussions: write
on:
push:
branches:
- main
- stable
- 'releases/**'
tags:
- '*'
pull_request:
workflow_dispatch:
jobs:
lint_py:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v6
with:
enable-cache: true
activate-environment: true
- run: uv sync --dev --all-extras
- run: uv pip install fastapi
- run: uv run ruff format --check
- run: uv run ruff check
- run: uv run pyright
find_py_tests:
runs-on: ubuntu-latest
outputs:
PY_TASKS: ${{ steps.lsgrep.outputs.PY_TASKS }}
# [{ "kind": "test" | "example", "name": "test_eventbus" }, ...]
PY_TEST_TASKS: ${{ steps.lsgrep.outputs.PY_TEST_TASKS }}
# [{ "kind": "test", "name": "test_eventbus" }, ...]
steps:
- uses: actions/checkout@v4
- id: lsgrep
run: |
PY_TEST_TASKS="$(
find tests -maxdepth 1 -type f -name 'test_*.py' ! -name 'test_eventbus_performance.py' \
| sort \
| sed 's|^tests/||' \
| sed 's|\.py$||' \
| jq -R -s -c 'split("\n")[:-1] | map({kind: "test", name: .})'
)"
PY_EXAMPLE_TASKS="$(
(
if [[ -d examples ]]; then
find examples -maxdepth 1 -type f -name '*.py' | sort
fi
) \
| sed 's|^examples/||' \
| sed 's|\.py$||' \
| jq -R -s -c 'split("\n")[:-1] | map({kind: "example", name: .})'
)"
PY_TASKS="$(jq -cn --argjson tests "$PY_TEST_TASKS" --argjson examples "$PY_EXAMPLE_TASKS" '$tests + $examples')"
echo "PY_TEST_TASKS=${PY_TEST_TASKS}" >> "$GITHUB_OUTPUT"
echo "PY_TASKS=${PY_TASKS}" >> "$GITHUB_OUTPUT"
echo "$PY_TASKS"
# https://code.dblock.org/2021/09/03/generating-task-matrix-by-looping-over-repo-files-with-github-actions.html
- name: Check that at least one test file is found
run: |
if [[ -z "${{ steps.lsgrep.outputs.PY_TEST_TASKS }}" || "${{ steps.lsgrep.outputs.PY_TEST_TASKS }}" == "[]" ]]; then
echo "Failed to find any test_*.py files in tests/ folder!" > /dev/stderr
exit 1
fi
tests:
needs:
- lint_py
- find_py_tests
runs-on: ubuntu-latest
env:
IN_DOCKER: 'True'
strategy:
matrix:
task: ${{ fromJson(needs.find_py_tests.outputs.PY_TASKS || '[{"kind":"error","name":"FAILED_TO_DISCOVER_TASKS"}]') }}
# autodiscovers files in tests/test_*.py and examples/*.py
# - { kind: "test", name: "test_eventbus" }
# - { kind: "example", name: "quickstart" }
# ... and more
name: ${{ matrix.task.kind }}-${{ matrix.task.name }}
steps:
- name: Check that the previous step managed to find some tasks for us to run
run: |
if [[ "${{ matrix.task.kind }}" == "error" ]]; then
echo "Failed get list of tasks in tests/test_*.py and examples/*.py from find_py_tests job" > /dev/stderr
exit 1
fi
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v6
with:
enable-cache: true
activate-environment: true
- uses: pnpm/action-setup@v4
if: matrix.task.kind == 'test' && matrix.task.name == 'test_cross_runtime_roundtrip'
with:
version: 10
- uses: actions/setup-node@v4
if: matrix.task.kind == 'test' && matrix.task.name == 'test_cross_runtime_roundtrip'
with:
node-version: 22
cache: pnpm
cache-dependency-path: abxbus-ts/pnpm-lock.yaml
- name: Install bridge service binaries
if: matrix.task.kind == 'test' && matrix.task.name == 'test_bridges'
run: |
sudo apt-get update
sudo apt-get install -y redis-server nats-server postgresql sqlite3
PG_INITDB_PATH="$(find /usr/lib/postgresql -type f -name initdb | head -n 1)"
PG_BINDIR="$(dirname "${PG_INITDB_PATH}")"
if [[ -z "${PG_BINDIR}" || ! -x "${PG_BINDIR}/initdb" || ! -x "${PG_BINDIR}/postgres" ]]; then
echo "Failed to locate PostgreSQL binaries (initdb/postgres)" > /dev/stderr
exit 1
fi
echo "${PG_BINDIR}" >> "${GITHUB_PATH}"
export PATH="${PG_BINDIR}:${PATH}"
redis-server --version
nats-server --version
initdb --version
postgres --version
sqlite3 --version
- run: uv sync --dev --all-extras
- name: Build TypeScript ESM bundle for cross-runtime roundtrip tests
if: matrix.task.kind == 'test' && matrix.task.name == 'test_cross_runtime_roundtrip'
run: |
pnpm --dir abxbus-ts install --frozen-lockfile
pnpm --dir abxbus-ts run build
- name: Verify bridge optional Python deps
if: matrix.task.kind == 'test' && matrix.task.name == 'test_bridges'
run: |
uv run python - <<'PY'
import importlib
modules = ['asyncpg', 'redis', 'nats']
for module in modules:
importlib.import_module(module)
print('optional bridge deps import OK')
PY
- name: Run test with coverage
if: matrix.task.kind == 'test'
run: uv run coverage run --parallel-mode --source=abxbus -m pytest -x tests/${{ matrix.task.name }}.py
- name: Run example
if: matrix.task.kind == 'example'
run: uv run coverage run --parallel-mode --source=abxbus examples/${{ matrix.task.name }}.py
- name: Check coverage files
if: always()
run: |
echo "Looking for coverage files..."
ls -la .coverage* 2>/dev/null || ls -la | grep coverage || echo "No coverage files found"
coverage_file="$(find . -maxdepth 1 -type f -name '.coverage*' | head -n 1)"
if [ -n "$coverage_file" ]; then
echo "Found coverage file ($coverage_file), size: $(stat -f%z "$coverage_file" 2>/dev/null || stat -c%s "$coverage_file") bytes"
fi
- name: Upload coverage data
uses: actions/upload-artifact@v4
with:
name: coverage-${{ matrix.task.kind }}-${{ matrix.task.name }}
path: |
.coverage*
pyproject.toml
retention-days: 7
include-hidden-files: true
if: always()
coverage:
needs: tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v6
with:
enable-cache: true
activate-environment: true
- run: uv sync --dev --all-extras
- name: Download all coverage data
uses: actions/download-artifact@v4
with:
pattern: coverage-*
path: coverage-data/
- name: Combine coverage data
run: |
# Find all .coverage* files and copy them with unique names
counter=1
for coverage_file in $(find coverage-data -name ".coverage*" -type f); do
cp "$coverage_file" ".coverage.$counter"
counter=$((counter + 1))
done
- name: Combine coverage & fail if it's <50%
run: |
uv tool install 'coverage[toml]'
OMIT='abxbus/bridge*.py'
coverage combine
coverage html --skip-covered --skip-empty --omit="$OMIT"
coverage xml --omit="$OMIT"
echo "### Python combined coverage" >> "$GITHUB_STEP_SUMMARY"
echo "" >> "$GITHUB_STEP_SUMMARY"
# Report and write a markdown table to summary.
coverage report --omit="$OMIT" --format=markdown >> $GITHUB_STEP_SUMMARY
# Report again and fail if under 50%.
coverage report --omit="$OMIT" --fail-under=50
- name: Upload combined coverage report
id: upload_py_coverage_report
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: |
htmlcov/
coverage.xml
pyproject.toml
retention-days: 7
- name: Append Python coverage artifact link
run: |
echo "" >> "$GITHUB_STEP_SUMMARY"
echo "[Download Python HTML coverage artifact (coverage-report)](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}/artifacts/${{ steps.upload_py_coverage_report.outputs.artifact-id }})" >> "$GITHUB_STEP_SUMMARY"
perf:
runs-on: ubuntu-latest
outputs:
perf_stats: ${{ steps.export_perf.outputs.perf_stats }}
steps:
- uses: actions/checkout@v4
- uses: astral-sh/setup-uv@v6
with:
enable-cache: true
activate-environment: true
- run: uv sync --dev --all-extras
- run: uv run pytest -x tests/test_eventbus_performance.py
- name: Run Python runtime perf
run: uv run python tests/performance_runtime.py | tee python_perf.log
- name: Export Python perf stats
id: export_perf
run: |
python - <<'PY'
import json
import os
import re
from pathlib import Path
text = Path("python_perf.log").read_text(encoding="utf-8", errors="replace")
lines = [line.strip() for line in text.splitlines()]
stat_lines = [line for line in lines if re.match(r"^\[python\]\s.+:\s.*latency=", line)]
if not stat_lines:
# Fallback: parse final JSON payload if present.
for index, char in enumerate(text):
if char != "[":
continue
try:
payload = json.loads(text[index:])
except Exception:
continue
if not isinstance(payload, list):
continue
compact = []
for item in payload:
if not isinstance(item, dict):
continue
scenario = str(item.get("scenario_id", "unknown"))
latency = item.get("ms_per_event")
unit = str(item.get("ms_per_event_unit", "event"))
throughput = item.get("throughput")
peak_rss = item.get("peak_rss_kb_per_event")
parts = [f"{scenario}:"]
if isinstance(latency, (int, float)):
parts.append(f"latency={float(latency):.3f}ms/{unit}")
if isinstance(throughput, (int, float)):
parts.append(f"throughput={int(throughput)}/s")
if isinstance(peak_rss, (int, float)):
parts.append(f"peak_rss={float(peak_rss):.3f}kb/event")
compact.append(" ".join(parts))
if compact:
stat_lines = compact
break
if not stat_lines:
stat_lines = ["unable to parse python perf stats; see job log"]
stats = "\n".join(stat_lines)
with open(os.environ["GITHUB_OUTPUT"], "a", encoding="utf-8") as fh:
fh.write("perf_stats<<EOF\n")
fh.write(stats + "\n")
fh.write("EOF\n")
with open(os.environ["GITHUB_STEP_SUMMARY"], "a", encoding="utf-8") as fh:
fh.write("### Python Perf Stats\n\n```\n")
fh.write(stats + "\n")
fh.write("```\n")
PY