Skip to content

Add support for updating README.md automatically #12

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Dec 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 55 additions & 6 deletions .ci/benchmark.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,64 @@
import argparse
import os
import sys
from src.benchmark.utils import read_metrics, to_markdown_table

if __name__ == "__main__":
# Generate statistics report
statistics_path = sys.argv[1]
metrics = read_metrics(statistics_path, metric="accuracy")

def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--path", type=str, required=True, help="Report path.")
parser.add_argument("--write-gh-job-summary", action="store_true", help="Write to GitHub job summary.")
parser.add_argument("--update-readme", action="store_true", help="Update statistics report in README.md.")
return parser.parse_args()


def generate_report(path: str):
metrics = read_metrics(path, metric="accuracy")
html_table = to_markdown_table(metrics)
return html_table

# Write to workflow job summary

def write_job_summary(report):
summary_path = os.environ["GITHUB_STEP_SUMMARY"]
with open(summary_path, "a") as f:
f.write("## Torchbenchmark statistics report\n")
f.write(html_table)
f.write(report)


def update_readme(report):
project_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
readme_path = os.path.join(project_path, "README.md")
print(readme_path)
with open(readme_path, "r") as f:
readme_content = f.read()

start_marker = "<!-- Torchbenchmark start -->"
end_marker = "<!-- Torchbenchmark end -->"
start_index = readme_content.find(start_marker)
end_index = readme_content.find(end_marker)
assert start_index != -1
assert end_index != -1

start_index += len(start_marker)
new_readme_content = (
readme_content[:start_index] + "\n\n" +
report + "\n\n" +
readme_content[end_index:]
)
with open(readme_path, "w") as f:
f.write(new_readme_content)


if __name__ == "__main__":
args = parse_args()

# Generate statistics report
report = generate_report(args.path)

# Write to workflow job summary
if args.write_gh_job_summary:
write_job_summary(report)

# Update README.md
if args.update_readme:
update_readme(report)
10 changes: 10 additions & 0 deletions .github/dependabot.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
# Check for updates to GitHub Actions every week
interval: "weekly"
open-pull-requests-limit: 2
reviewers:
- "shink"
33 changes: 21 additions & 12 deletions .github/workflows/_ascend_npu_benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ jobs:
run: |
pip install -r benchmark/requirements.txt --constraint ascend_npu/requirements.txt "numpy==1.*"
python benchmark/install.py --userbenchmark test_bench --continue_on_fail
env:
HF_ENDPOINT: https://hf-mirror.com

- name: Install project dependencies
run: |
Expand Down Expand Up @@ -130,19 +132,26 @@ jobs:
overwrite: true

- name: Write to workflow job summary
id: report
run: |
set -x
realpath benchmark/ascend_npu_benchmark.json
ls benchmark
cat benchmark/ascend_npu_benchmark.json
python .ci/benchmark.py --write-gh-job-summary --path benchmark/ascend_npu_benchmark.json

output_path=$(realpath benchmark/ascend_npu_benchmark.json)
python .ci/benchmark.py ${output_path}

# TODO(shink)
- name: Update README.md
if: ${{ github.event_name == 'push' }}
if: ${{ github.event_name == 'push' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
id: update-readme
run: |
echo "${{ github.event_name }}"
echo "${{ github.event_name == 'push' }}"
python .ci/benchmark.py --update-readme --path benchmark/ascend_npu_benchmark.json
if git diff --quiet README.md; then
echo "changed=false" >> $GITHUB_OUTPUT
else
echo "changed=true" >> $GITHUB_OUTPUT
fi

- name: Create a pull request for changes to README.md
if: ${{ steps.update-readme.outputs.changed == 'true' }}
uses: peter-evans/create-pull-request@v7
with:
add-paths: README.md
branch: ascend-npu/benchmark
title: "[Ascend NPU] Update torchbenchmark report in README.md"
commit-message: "Update README.md"
reviewers: shink
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
name: '_ascend_npu_test'
name: '_ascend_npu_ut'

on:
workflow_call:
Expand Down
26 changes: 21 additions & 5 deletions .github/workflows/ascend_npu_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,34 @@ on:
push:
branches:
- 'main'

paths:
- '.github/workflows/ascend_npu_test.yml'
- '.github/workflows/_ascend_npu_build.yml'
- '.github/workflows/_ascend_npu_ut.yml'
- '.github/workflows/_ascend_npu_benchmark.yml'
- '.github/actions/**'
- '.ci/**'
- 'ascend_npu/**'
- 'src/**'
- '!**/*.md'
pull_request:
branches:
- 'main'

paths:
- '.github/workflows/ascend_npu_test.yml'
- '.github/workflows/_ascend_npu_build.yml'
- '.github/workflows/_ascend_npu_ut.yml'
- '.github/workflows/_ascend_npu_benchmark.yml'
- '.github/actions/**'
- '.ci/**'
- 'ascend_npu/**'
- 'src/**'
- '!**/*.md'
release:
types:
- 'published'

schedule:
- cron: '0 12 * * *'

workflow_dispatch:
inputs:
runner:
Expand Down Expand Up @@ -87,7 +103,7 @@ jobs:
needs:
- prepare
- build
uses: ./.github/workflows/_ascend_npu_test.yml
uses: ./.github/workflows/_ascend_npu_ut.yml
with:
runner: ${{ needs.prepare.outputs.runner }}
image: ${{ needs.prepare.outputs.image }}
Expand Down
10 changes: 8 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,11 @@ across various devices by running comprehensive GitHub workflows.

## Accelerator Integration Test Results

<!-- Start -->
<details>

<summary>Torchbenchmark statistics report</summary>

<!-- Torchbenchmark start -->

| | [torch_npu][1] |
|---------------------------------|----------------|
Expand Down Expand Up @@ -121,7 +125,9 @@ across various devices by running comprehensive GitHub workflows.

[3]: https://github.com/cosdt/pytorch-integration-tests/actions/workflows/ascend_npu_test.yml

<!-- End -->
<!-- Torchbenchmark end -->

</details>

## Overview

Expand Down
10 changes: 10 additions & 0 deletions ascend_npu/matadata.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
device: "npu"
backend_extension: "torch_npu"
link: https://github.com/Ascend/pytorch
torchbenchmark:
test:
- train
- eval
models:
skip:
- llava
11 changes: 0 additions & 11 deletions ascend_npu/metadata.json

This file was deleted.

21 changes: 13 additions & 8 deletions src/benchmark/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,13 +25,13 @@ class TorchBenchModelMetric:


def read_json(path: str) -> dict:
with open(path, 'r') as f:
with open(path, "r") as f:
data = json.load(f)
return data


def save_file(path: str, data) -> None:
with open(path, 'w') as file:
with open(path, "w") as file:
file.write(data)


Expand All @@ -54,7 +54,7 @@ def parse_to_dict(config_str: str):

def read_metrics(path: str, *, metric=None) -> List[TorchBenchModelMetric]:
output = read_json(path)
metrics_data = output.get('metrics', {})
metrics_data = output.get("metrics", {})

metrics = []
for metric_key, metric_value in metrics_data.items():
Expand All @@ -75,10 +75,11 @@ def read_metrics(path: str, *, metric=None) -> List[TorchBenchModelMetric]:


def generate_table_rows(metrics: List[TorchBenchModelMetric]):
models = list({metric.key.name for metric in metrics})
devices = list({metric.key.device for metric in metrics})
models = list({metric.key.name for metric in metrics})
models = sorted(models, key=lambda x: x.lower())

def filter_result(metrics: List[TorchBenchModelMetric], *, model, device):
def filter_metric(metrics: List[TorchBenchModelMetric], *, model, device):
for metric in metrics:
if metric.key.name == model and metric.key.device == device:
return metric
Expand All @@ -87,10 +88,14 @@ def filter_result(metrics: List[TorchBenchModelMetric], *, model, device):
for model in models:
row = [model]
for device in devices:
metric = filter_result(metrics, model=model, device=device)
metric = filter_metric(metrics, model=model, device=device)
if metric is not None:
is_pass = metric.value == "pass"
cell = "✅" if is_pass else "❌"
if metric.value == "pass":
cell = "✅"
elif metric.value == "skip":
cell = "⚠️"
else:
cell = "❌"
else:
cell = ""
row.append(cell)
Expand Down
Loading
Loading