diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..efde3a0 --- /dev/null +++ b/.flake8 @@ -0,0 +1,12 @@ +# Source: https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/assets/check-python/.flake8 +# See: https://flake8.pycqa.org/en/latest/user/configuration.html +# The code style defined in this file is the official standardized style to be used in all Arduino tooling projects and +# should not be modified. + +[flake8] +doctests = True +# W503 and W504 are mutually exclusive. PEP 8 recommends line break before. +ignore = W503 +max-complexity = 10 +max-line-length = 120 +select = E,W,F,C,N diff --git a/.github/workflows/check-python-task.yml b/.github/workflows/check-python-task.yml new file mode 100644 index 0000000..6d12a5f --- /dev/null +++ b/.github/workflows/check-python-task.yml @@ -0,0 +1,122 @@ +# Source: https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/check-python-task.md +name: Check Python + +env: + # See: https://github.com/actions/setup-python/tree/main#available-versions-of-python + PYTHON_VERSION: "3.11" + +# See: https://docs.github.com/actions/using-workflows/events-that-trigger-workflows +on: + create: + push: + paths: + - ".github/workflows/check-python-task.ya?ml" + - "**/.flake8" + - "**/poetry.lock" + - "**/pyproject.toml" + - "**/setup.cfg" + - "Taskfile.ya?ml" + - "**/tox.ini" + - "**.py" + pull_request: + paths: + - ".github/workflows/check-python-task.ya?ml" + - "**/.flake8" + - "**/poetry.lock" + - "**/pyproject.toml" + - "**/setup.cfg" + - "Taskfile.ya?ml" + - "**/tox.ini" + - "**.py" + schedule: + # Run periodically to catch breakage caused by external changes. + - cron: "0 8 * * WED" + workflow_dispatch: + repository_dispatch: + +jobs: + run-determination: + runs-on: ubuntu-latest + permissions: {} + outputs: + result: ${{ steps.determination.outputs.result }} + steps: + - name: Determine if the rest of the workflow should run + id: determination + run: | + RELEASE_BRANCH_REGEX="refs/heads/[0-9]+.[0-9]+.x" + # The `create` event trigger doesn't support `branches` filters, so it's necessary to use Bash instead. + if [[ + "${{ github.event_name }}" != "create" || + "${{ github.ref }}" =~ $RELEASE_BRANCH_REGEX + ]]; then + # Run the other jobs. + RESULT="true" + else + # There is no need to run the other jobs. + RESULT="false" + fi + + echo "result=$RESULT" >> $GITHUB_OUTPUT + + lint: + needs: run-determination + if: needs.run-determination.outputs.result == 'true' + runs-on: ubuntu-latest + permissions: + contents: read + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + run: pip install poetry + + - name: Install Task + uses: arduino/setup-task@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + version: 3.x + + - name: Run flake8 + uses: liskin/gh-problem-matcher-wrap@v3 + with: + linters: flake8 + run: task python:lint + + formatting: + needs: run-determination + if: needs.run-determination.outputs.result == 'true' + runs-on: ubuntu-latest + permissions: + contents: read + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Install Python + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install Poetry + run: pip install poetry + + - name: Install Task + uses: arduino/setup-task@v1 + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + version: 3.x + + - name: Format Python code + run: task python:format + + - name: Check formatting + run: git diff --color --exit-code diff --git a/.github/workflows/libraries_report-size-deltas.yml b/.github/workflows/libraries_report-size-deltas.yml index c232688..4e3494a 100644 --- a/.github/workflows/libraries_report-size-deltas.yml +++ b/.github/workflows/libraries_report-size-deltas.yml @@ -38,14 +38,6 @@ jobs: poetry install \ --no-root - - name: Lint with flake8 - run: | - poetry run \ - flake8 \ - --config "${{ env.PYTHON_PROJECT_PATH }}/.flake8" \ - --show-source \ - "${{ env.PYTHON_PROJECT_PATH }}" - - name: Run Python unit tests and record code coverage data run: | export PYTHONPATH="${{ env.PYTHON_PROJECT_PATH }}" diff --git a/README.md b/README.md index 78c446a..5e3b679 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,7 @@ [![Check npm status](https://github.com/arduino/report-size-deltas/actions/workflows/check-npm-task.yml/badge.svg)](https://github.com/arduino/report-size-deltas/actions/workflows/check-npm-task.yml) [![Check Poetry status](https://github.com/arduino/report-size-deltas/actions/workflows/check-poetry-task.yml/badge.svg)](https://github.com/arduino/report-size-deltas/actions/workflows/check-poetry-task.yml) +[![Check Python status](https://github.com/arduino/report-size-deltas/actions/workflows/check-python-task.yml/badge.svg)](https://github.com/arduino/report-size-deltas/actions/workflows/check-python-task.yml) [![Check Taskfiles status](https://github.com/arduino/report-size-deltas/actions/workflows/check-taskfiles.yml/badge.svg)](https://github.com/arduino/report-size-deltas/actions/workflows/check-taskfiles.yml) [![Tests](https://github.com/arduino/report-size-deltas/workflows/libraries/report-size-deltas%20workflow/badge.svg)](https://github.com/arduino/report-size-deltas/actions?workflow=libraries/report-size-deltas+workflow) [![Integration Tests](https://github.com/arduino/report-size-deltas/actions/workflows/test-integration.yml/badge.svg)](https://github.com/arduino/report-size-deltas/actions/workflows/test-integration.yml) diff --git a/Taskfile.yml b/Taskfile.yml index 1b29643..d236fff 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -11,11 +11,16 @@ tasks: deps: - task: npm:validate - task: general:check-spelling + - task: npm:validate + - task: poetry:validate + - task: python:lint fix: desc: Make automated corrections to the project's files deps: - task: general:correct-spelling + - task: poetry:sync + - task: python:format # Source: https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/assets/spell-check-task/Taskfile.yml general:check-spelling: @@ -134,6 +139,22 @@ tasks: check \ --lock + # Source: https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/assets/check-python-task/Taskfile.yml + python:format: + desc: Format Python files + deps: + - task: poetry:install-deps + cmds: + - poetry run black . + + # Source: https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/assets/check-python-task/Taskfile.yml + python:lint: + desc: Lint Python code + deps: + - task: poetry:install-deps + cmds: + - poetry run flake8 --show-source + # Make a temporary file named according to the passed TEMPLATE variable and print the path passed to stdout # Source: https://github.com/arduino/tooling-project-assets/blob/main/workflow-templates/assets/windows-task/Taskfile.yml utility:mktemp-file: diff --git a/poetry.lock b/poetry.lock index 89ab771..e89423f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,5 +1,59 @@ # This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. +[[package]] +name = "black" +version = "23.11.0" +description = "The uncompromising code formatter." +optional = false +python-versions = ">=3.8" +files = [ + {file = "black-23.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dbea0bb8575c6b6303cc65017b46351dc5953eea5c0a59d7b7e3a2d2f433a911"}, + {file = "black-23.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:412f56bab20ac85927f3a959230331de5614aecda1ede14b373083f62ec24e6f"}, + {file = "black-23.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d136ef5b418c81660ad847efe0e55c58c8208b77a57a28a503a5f345ccf01394"}, + {file = "black-23.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:6c1cac07e64433f646a9a838cdc00c9768b3c362805afc3fce341af0e6a9ae9f"}, + {file = "black-23.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cf57719e581cfd48c4efe28543fea3d139c6b6f1238b3f0102a9c73992cbb479"}, + {file = "black-23.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:698c1e0d5c43354ec5d6f4d914d0d553a9ada56c85415700b81dc90125aac244"}, + {file = "black-23.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:760415ccc20f9e8747084169110ef75d545f3b0932ee21368f63ac0fee86b221"}, + {file = "black-23.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:58e5f4d08a205b11800332920e285bd25e1a75c54953e05502052738fe16b3b5"}, + {file = "black-23.11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:45aa1d4675964946e53ab81aeec7a37613c1cb71647b5394779e6efb79d6d187"}, + {file = "black-23.11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c44b7211a3a0570cc097e81135faa5f261264f4dfaa22bd5ee2875a4e773bd6"}, + {file = "black-23.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a9acad1451632021ee0d146c8765782a0c3846e0e0ea46659d7c4f89d9b212b"}, + {file = "black-23.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:fc7f6a44d52747e65a02558e1d807c82df1d66ffa80a601862040a43ec2e3142"}, + {file = "black-23.11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7f622b6822f02bfaf2a5cd31fdb7cd86fcf33dab6ced5185c35f5db98260b055"}, + {file = "black-23.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:250d7e60f323fcfc8ea6c800d5eba12f7967400eb6c2d21ae85ad31c204fb1f4"}, + {file = "black-23.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5133f5507007ba08d8b7b263c7aa0f931af5ba88a29beacc4b2dc23fcefe9c06"}, + {file = "black-23.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:421f3e44aa67138ab1b9bfbc22ee3780b22fa5b291e4db8ab7eee95200726b07"}, + {file = "black-23.11.0-py3-none-any.whl", hash = "sha256:54caaa703227c6e0c87b76326d0862184729a69b73d3b7305b6288e1d830067e"}, + {file = "black-23.11.0.tar.gz", hash = "sha256:4c68855825ff432d197229846f971bc4d6666ce90492e5b02013bcaca4d9ab05"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +packaging = ">=22.0" +pathspec = ">=0.9.0" +platformdirs = ">=2" + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "click" +version = "8.1.7" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, + {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + [[package]] name = "codespell" version = "2.2.5" @@ -130,6 +184,17 @@ files = [ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, ] +[[package]] +name = "mypy-extensions" +version = "1.0.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.5" +files = [ + {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, + {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, +] + [[package]] name = "packaging" version = "23.2" @@ -141,6 +206,17 @@ files = [ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"}, ] +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + [[package]] name = "pep8-naming" version = "0.13.3" @@ -155,6 +231,21 @@ files = [ [package.dependencies] flake8 = ">=5.0.0" +[[package]] +name = "platformdirs" +version = "4.1.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.8" +files = [ + {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"}, + {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"}, +] + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"] + [[package]] name = "pluggy" version = "1.3.0" @@ -232,4 +323,4 @@ dev = ["pre-commit", "pytest-asyncio", "tox"] [metadata] lock-version = "2.0" python-versions = "3.11.*" -content-hash = "3de9d59b8556c6915aa74bf390e236d29d963f736de639c03ad2baf6f57da606" +content-hash = "3dc8c722f318ce66f471be75203697be676024db9e933b108ed367a3974f067a" diff --git a/pyproject.toml b/pyproject.toml index b5f60c3..5e884ef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,6 @@ +[tool.black] +line-length = 120 + [tool.poetry] name = "arduino/report-size-deltas" version = "0.0.0" @@ -8,6 +11,7 @@ authors = ["Arduino "] python = "3.11.*" [tool.poetry.group.dev.dependencies] +black = "23.11.0" codespell = "2.2.5" coverage = "7.3.3" flake8 = "6.1.0" diff --git a/reportsizedeltas/.flake8 b/reportsizedeltas/.flake8 deleted file mode 100644 index 8f80241..0000000 --- a/reportsizedeltas/.flake8 +++ /dev/null @@ -1,7 +0,0 @@ -[flake8] -doctests = True -# W503 and W504 are mutually exclusive. PEP 8 recommends line break before. -ignore = W503 -max-complexity = 10 -max-line-length = 120 -select = E,W,F,C,N diff --git a/reportsizedeltas/reportsizedeltas.py b/reportsizedeltas/reportsizedeltas.py index 4fafdcf..67b948c 100644 --- a/reportsizedeltas/reportsizedeltas.py +++ b/reportsizedeltas/reportsizedeltas.py @@ -21,13 +21,17 @@ def main() -> None: set_verbosity(enable_verbosity=False) if "INPUT_SIZE-DELTAS-REPORTS-ARTIFACT-NAME" in os.environ: - print("::warning::The size-deltas-report-artifact-name input is deprecated. Use the equivalent input: " - "sketches-reports-source instead.") + print( + "::warning::The size-deltas-report-artifact-name input is deprecated. Use the equivalent input: " + "sketches-reports-source instead." + ) os.environ["INPUT_SKETCHES-REPORTS-SOURCE"] = os.environ["INPUT_SIZE-DELTAS-REPORTS-ARTIFACT-NAME"] - report_size_deltas = ReportSizeDeltas(repository_name=os.environ["GITHUB_REPOSITORY"], - sketches_reports_source=os.environ["INPUT_SKETCHES-REPORTS-SOURCE"], - token=os.environ["INPUT_GITHUB-TOKEN"]) + report_size_deltas = ReportSizeDeltas( + repository_name=os.environ["GITHUB_REPOSITORY"], + sketches_reports_source=os.environ["INPUT_SKETCHES-REPORTS-SOURCE"], + token=os.environ["INPUT_GITHUB-TOKEN"], + ) report_size_deltas.report_size_deltas() @@ -59,11 +63,13 @@ class ReportSizeDeltas: artifact_name -- name of the workflow artifact that contains the memory usage data token -- GitHub access token """ + report_key_beginning = "**Memory usage change @ " not_applicable_indicator = "N/A" class ReportKeys: """Key names used in the sketches report dictionary.""" + boards = "boards" board = "board" commit_hash = "commit_hash" @@ -115,8 +121,7 @@ def report_size_deltas_from_workflow_artifacts(self) -> None: page_number = 1 page_count = 1 while page_number <= page_count: - api_data = self.api_request(request="repos/" + self.repository_name + "/pulls", - page_number=page_number) + api_data = self.api_request(request="repos/" + self.repository_name + "/pulls", page_number=page_number) prs_data = api_data["json_data"] for pr_data in prs_data: # Note: closed PRs are not listed in the API response @@ -130,16 +135,14 @@ def report_size_deltas_from_workflow_artifacts(self) -> None: print("::debug::PR locked, skipping") continue - if self.report_exists(pr_number=pr_number, - pr_head_sha=pr_head_sha): + if self.report_exists(pr_number=pr_number, pr_head_sha=pr_head_sha): # Go on to the next PR print("::debug::Report already exists") continue artifact_download_url = self.get_artifact_download_url_for_sha( - pr_user_login=pr_data["user"]["login"], - pr_head_ref=pr_data["head"]["ref"], - pr_head_sha=pr_head_sha) + pr_user_login=pr_data["user"]["login"], pr_head_ref=pr_data["head"]["ref"], pr_head_sha=pr_head_sha + ) if artifact_download_url is None: # Go on to the next PR print("::debug::No sketches report artifact found") @@ -175,9 +178,10 @@ def report_exists(self, pr_number: int, pr_head_sha: str) -> bool: page_number = 1 page_count = 1 while page_number <= page_count: - api_data = self.api_request(request="repos/" + self.repository_name + "/issues/" + str(pr_number) - + "/comments", - page_number=page_number) + api_data = self.api_request( + request="repos/" + self.repository_name + "/issues/" + str(pr_number) + "/comments", + page_number=page_number, + ) comments_data = api_data["json_data"] for comment_data in comments_data: @@ -203,10 +207,15 @@ def get_artifact_download_url_for_sha(self, pr_user_login: str, pr_head_ref: str page_number = 1 page_count = 1 while page_number <= page_count: - api_data = self.api_request(request="repos/" + self.repository_name + "/actions/runs", - request_parameters="actor=" + pr_user_login + "&branch=" + pr_head_ref - + "&event=pull_request&status=completed", - page_number=page_number) + api_data = self.api_request( + request="repos/" + self.repository_name + "/actions/runs", + request_parameters="actor=" + + pr_user_login + + "&branch=" + + pr_head_ref + + "&event=pull_request&status=completed", + page_number=page_number, + ) runs_data = api_data["json_data"] # Find the runs with the head SHA of the PR (there may be multiple runs) @@ -233,9 +242,10 @@ def get_artifact_download_url_for_run(self, run_id: str) -> str | None: page_number = 1 page_count = 1 while page_number <= page_count: - api_data = self.api_request(request="repos/" + self.repository_name + "/actions/runs/" - + str(run_id) + "/artifacts", - page_number=page_number) + api_data = self.api_request( + request="repos/" + self.repository_name + "/actions/runs/" + str(run_id) + "/artifacts", + page_number=page_number, + ) artifacts_data = api_data["json_data"] for artifact_data in artifacts_data["artifacts"]: @@ -259,8 +269,9 @@ def get_artifact(self, artifact_download_url: str): artifact_folder_object = tempfile.TemporaryDirectory(prefix="reportsizedeltas-") try: # Download artifact - with open(file=artifact_folder_object.name + "/" + self.sketches_reports_source + ".zip", - mode="wb") as out_file: + with open( + file=artifact_folder_object.name + "/" + self.sketches_reports_source + ".zip", mode="wb" + ) as out_file: with self.raw_http_request(url=artifact_download_url) as fp: out_file.write(fp.read()) @@ -292,10 +303,11 @@ def get_sketches_reports(self, artifact_folder_object): report_data = json.load(report_file) if ( (self.ReportKeys.boards not in report_data) - or (self.ReportKeys.sizes - not in report_data[self.ReportKeys.boards][0]) - or (self.ReportKeys.maximum - not in report_data[self.ReportKeys.boards][0][self.ReportKeys.sizes][0]) + or (self.ReportKeys.sizes not in report_data[self.ReportKeys.boards][0]) + or ( + self.ReportKeys.maximum + not in report_data[self.ReportKeys.boards][0][self.ReportKeys.sizes][0] + ) ): # Sketches reports use an old format, skip print("Old format sketches report found, skipping") @@ -308,8 +320,10 @@ def get_sketches_reports(self, artifact_folder_object): break if not sketches_reports: - print("No size deltas data found in workflow artifact for this PR. The compile-examples action's " - "enable-size-deltas-report input must be set to true to produce size deltas data.") + print( + "No size deltas data found in workflow artifact for this PR. The compile-examples action's " + "enable-size-deltas-report input must be set to true to produce size deltas data." + ) return sketches_reports @@ -345,24 +359,23 @@ def generate_report(self, sketches_reports) -> str: report_markdown = report_markdown + generate_markdown_table(row_list=summary_report_data) + "\n" # Add full table - report_markdown_with_table = (report_markdown - + "
\n" - "Click for full report table\n\n") - report_markdown_with_table = (report_markdown_with_table - + generate_markdown_table(row_list=full_report_data) - + "\n
\n\n") + report_markdown_with_table = ( + report_markdown + "
\n" "Click for full report table\n\n" + ) + report_markdown_with_table = ( + report_markdown_with_table + generate_markdown_table(row_list=full_report_data) + "\n
\n\n" + ) if len(report_markdown_with_table) < maximum_report_length: report_markdown = report_markdown_with_table # Add full CSV - report_markdown_with_csv = (report_markdown - + "
\n" - "Click for full report CSV\n\n" - "```\n") - report_markdown_with_csv = (report_markdown_with_csv - + generate_csv_table(row_list=full_report_data) - + "```\n
") + report_markdown_with_csv = ( + report_markdown + "
\n" "Click for full report CSV\n\n" "```\n" + ) + report_markdown_with_csv = ( + report_markdown_with_csv + generate_csv_table(row_list=full_report_data) + "```\n
" + ) if len(report_markdown_with_csv) < maximum_report_length: report_markdown = report_markdown_with_csv @@ -386,51 +399,32 @@ def add_summary_report_row(self, report_data, fqbn_data) -> None: # Populate the row with data for size_data in fqbn_data[self.ReportKeys.sizes]: # Determine column number for this memory type - column_number = get_report_column_number( - report=report_data, - column_heading=size_data[self.ReportKeys.name] - ) + column_number = get_report_column_number(report=report_data, column_heading=size_data[self.ReportKeys.name]) # Add the memory data to the cell if self.ReportKeys.delta in size_data: # Absolute data - report_data[row_number][column_number] = ( - self.get_summary_value( - show_emoji=True, - minimum=size_data[self.ReportKeys.delta][self.ReportKeys.absolute][ - self.ReportKeys.minimum], - maximum=size_data[self.ReportKeys.delta][self.ReportKeys.absolute][ - self.ReportKeys.maximum] - ) + report_data[row_number][column_number] = self.get_summary_value( + show_emoji=True, + minimum=size_data[self.ReportKeys.delta][self.ReportKeys.absolute][self.ReportKeys.minimum], + maximum=size_data[self.ReportKeys.delta][self.ReportKeys.absolute][self.ReportKeys.maximum], ) # Relative data - report_data[row_number][column_number + 1] = ( - self.get_summary_value( - show_emoji=False, - minimum=size_data[self.ReportKeys.delta][self.ReportKeys.relative][ - self.ReportKeys.minimum], - maximum=size_data[self.ReportKeys.delta][self.ReportKeys.relative][ - self.ReportKeys.maximum] - ) + report_data[row_number][column_number + 1] = self.get_summary_value( + show_emoji=False, + minimum=size_data[self.ReportKeys.delta][self.ReportKeys.relative][self.ReportKeys.minimum], + maximum=size_data[self.ReportKeys.delta][self.ReportKeys.relative][self.ReportKeys.maximum], ) else: # Absolute data - report_data[row_number][column_number] = ( - self.get_summary_value( - show_emoji=True, - minimum=self.not_applicable_indicator, - maximum=self.not_applicable_indicator - ) + report_data[row_number][column_number] = self.get_summary_value( + show_emoji=True, minimum=self.not_applicable_indicator, maximum=self.not_applicable_indicator ) # Relative data - report_data[row_number][column_number + 1] = ( - self.get_summary_value( - show_emoji=False, - minimum=self.not_applicable_indicator, - maximum=self.not_applicable_indicator - ) + report_data[row_number][column_number + 1] = self.get_summary_value( + show_emoji=False, minimum=self.not_applicable_indicator, maximum=self.not_applicable_indicator ) def add_detailed_report_row(self, report_data, fqbn_data) -> None: @@ -454,23 +448,20 @@ def add_detailed_report_row(self, report_data, fqbn_data) -> None: report=report_data, column_heading=( "`{sketch_name}`
{size_name}".format( - sketch_name=sketch[self.ReportKeys.name], - size_name=size_data[self.ReportKeys.name] + sketch_name=sketch[self.ReportKeys.name], size_name=size_data[self.ReportKeys.name] ) - ) + ), ) # Add the memory data to the cell if self.ReportKeys.delta in size_data: # Absolute - report_data[row_number][column_number] = ( - size_data[self.ReportKeys.delta][self.ReportKeys.absolute] - ) + report_data[row_number][column_number] = size_data[self.ReportKeys.delta][self.ReportKeys.absolute] # Relative - report_data[row_number][column_number + 1] = ( - size_data[self.ReportKeys.delta][self.ReportKeys.relative] - ) + report_data[row_number][column_number + 1] = size_data[self.ReportKeys.delta][ + self.ReportKeys.relative + ] else: # Absolute report_data[row_number][column_number] = self.not_applicable_indicator @@ -528,11 +519,7 @@ def comment_report(self, pr_number: int, report_markdown: str) -> None: report_data = {"body": report_markdown} report_data = json.dumps(obj=report_data) report_data = report_data.encode(encoding="utf-8") - url = ("https://api.github.com/repos/" - + self.repository_name - + "/issues/" - + str(pr_number) - + "/comments") + url = "https://api.github.com/repos/" + self.repository_name + "/issues/" + str(pr_number) + "/comments" self.http_request(url=url, data=report_data) @@ -549,8 +536,15 @@ def api_request(self, request: str, request_parameters: str = "", page_number: i page_number -- Some responses will be paginated. This argument specifies which page should be returned. (default value: 1) """ - return self.get_json_response(url="https://api.github.com/" + request + "?" + request_parameters + "&page=" - + str(page_number) + "&per_page=100") + return self.get_json_response( + url="https://api.github.com/" + + request + + "?" + + request_parameters + + "&page=" + + str(page_number) + + "&per_page=100" + ) def get_json_response(self, url: str): """Load the specified URL and return a dictionary: @@ -599,9 +593,11 @@ def http_request(self, url: str, data: str | None = None) -> dict[str]: (default value: None) """ with self.raw_http_request(url=url, data=data) as response_object: - return {"body": response_object.read().decode(encoding="utf-8", errors="ignore"), - "headers": response_object.info(), - "url": response_object.geturl()} + return { + "body": response_object.read().decode(encoding="utf-8", errors="ignore"), + "headers": response_object.info(), + "url": response_object.geturl(), + } def raw_http_request(self, url: str, data: str | None = None): """Make a request and return an object containing the response. @@ -681,7 +677,7 @@ def determine_urlopen_retry(exception) -> bool: # urllib.error.URLError: "" + "it>", ] # Delay before retry (seconds) @@ -713,7 +709,7 @@ def get_page_count(link_header: str | None) -> int: if link_header is not None: # Get the pagination data for link in link_header.split(","): - if link[-13:] == ">; rel=\"last\"": + if link[-13:] == '>; rel="last"': link = re.split("[?&>]", link) for parameter in link: if parameter[:5] == "page=": diff --git a/reportsizedeltas/tests/test_reportsizedeltas.py b/reportsizedeltas/tests/test_reportsizedeltas.py index d9c8cbf..e3d6ea7 100644 --- a/reportsizedeltas/tests/test_reportsizedeltas.py +++ b/reportsizedeltas/tests/test_reportsizedeltas.py @@ -18,9 +18,9 @@ report_keys = reportsizedeltas.ReportSizeDeltas.ReportKeys() -def get_reportsizedeltas_object(repository_name="FooOwner/BarRepository", - sketches_reports_source="foo-artifact-name", - token="foo token"): +def get_reportsizedeltas_object( + repository_name="FooOwner/BarRepository", sketches_reports_source="foo-artifact-name", token="foo token" +): """Return a reportsizedeltas.ReportSizeDeltas object to use in tests. Keyword arguments: @@ -28,8 +28,9 @@ def get_reportsizedeltas_object(repository_name="FooOwner/BarRepository", sketches_reports_source -- name of the workflow artifact that contains the memory usage data token -- GitHub access token """ - return reportsizedeltas.ReportSizeDeltas(repository_name=repository_name, - sketches_reports_source=sketches_reports_source, token=token) + return reportsizedeltas.ReportSizeDeltas( + repository_name=repository_name, sketches_reports_source=sketches_reports_source, token=token + ) def directories_are_same(left_directory, right_directory): @@ -50,16 +51,16 @@ def directories_are_same(left_directory, right_directory): return False filecmp.clear_cache() - (_, mismatch, errors) = filecmp.cmpfiles(a=left_directory, - b=right_directory, - common=directory_comparison.common_files, - shallow=False) + (_, mismatch, errors) = filecmp.cmpfiles( + a=left_directory, b=right_directory, common=directory_comparison.common_files, shallow=False + ) if len(mismatch) > 0 or len(errors) > 0: return False for common_dir in directory_comparison.common_dirs: - if not directories_are_same(left_directory=left_directory.joinpath(common_dir), - right_directory=right_directory.joinpath(common_dir)): + if not directories_are_same( + left_directory=left_directory.joinpath(common_dir), right_directory=right_directory.joinpath(common_dir) + ): return False return True @@ -98,6 +99,7 @@ def setup_environment_variables(monkeypatch): class ActionInputs: """A container for the values of the environment variables""" + repository_name = "GoldenOwner/GoldenRepository" sketches_reports_source = "golden-source-name" token = "golden-github-token" @@ -126,16 +128,15 @@ def report_size_deltas(self): reportsizedeltas.ReportSizeDeltas.assert_called_once_with( repository_name=setup_environment_variables.repository_name, sketches_reports_source=setup_environment_variables.sketches_reports_source, - token=setup_environment_variables.token + token=setup_environment_variables.token, ) ReportSizeDeltas.report_size_deltas.assert_called_once() @pytest.mark.parametrize("use_size_deltas_report_artifact_name", [True, False]) -def test_main_size_deltas_report_artifact_name_deprecation_warning(capsys, - mocker, - monkeypatch, setup_environment_variables, - use_size_deltas_report_artifact_name): +def test_main_size_deltas_report_artifact_name_deprecation_warning( + capsys, mocker, monkeypatch, setup_environment_variables, use_size_deltas_report_artifact_name +): size_deltas_report_artifact_name = "golden-size-deltas-report-artifact-name-value" if use_size_deltas_report_artifact_name: @@ -162,7 +163,7 @@ def report_size_deltas(self): expected_output = ( expected_output + "::warning::The size-deltas-report-artifact-name input is deprecated. Use the equivalent input: " - "sketches-reports-source instead." + "sketches-reports-source instead." ) assert capsys.readouterr().out.strip() == expected_output @@ -207,8 +208,9 @@ def test_report_size_deltas_from_local_reports(mocker, monkeypatch): report = "golden report" monkeypatch.setenv("GITHUB_WORKSPACE", github_workspace) - monkeypatch.setenv("GITHUB_EVENT_PATH", - str(test_data_path.joinpath("test_report_size_deltas_pull_request", "githubevent.json"))) + monkeypatch.setenv( + "GITHUB_EVENT_PATH", str(test_data_path.joinpath("test_report_size_deltas_pull_request", "githubevent.json")) + ) mocker.patch("reportsizedeltas.ReportSizeDeltas.get_sketches_reports", autospec=True) mocker.patch("reportsizedeltas.ReportSizeDeltas.generate_report", autospec=True, return_value=report) @@ -227,8 +229,9 @@ def test_report_size_deltas_from_local_reports(mocker, monkeypatch): reportsizedeltas.ReportSizeDeltas.get_sketches_reports.return_value = sketches_reports report_size_deltas.report_size_deltas_from_local_reports() - report_size_deltas.get_sketches_reports.assert_called_once_with(report_size_deltas, - artifact_folder_object=sketches_reports_folder) + report_size_deltas.get_sketches_reports.assert_called_once_with( + report_size_deltas, artifact_folder_object=sketches_reports_folder + ) report_size_deltas.generate_report.assert_called_once_with(report_size_deltas, sketches_reports=sketches_reports) report_size_deltas.comment_report.assert_called_once_with(report_size_deltas, pr_number=42, report_markdown=report) @@ -239,20 +242,24 @@ def test_report_size_deltas_from_workflow_artifacts(mocker): pr_head_sha = "pr-head-sha" sketches_reports = [{reportsizedeltas.ReportSizeDeltas.ReportKeys.commit_hash: pr_head_sha}] report = "foo report" - json_data = [{"number": 1, "locked": True, "head": {"sha": pr_head_sha, "ref": "asdf"}, "user": {"login": "1234"}}, - {"number": 2, "locked": False, "head": {"sha": pr_head_sha, "ref": "asdf"}, "user": {"login": "1234"}}] + json_data = [ + {"number": 1, "locked": True, "head": {"sha": pr_head_sha, "ref": "asdf"}, "user": {"login": "1234"}}, + {"number": 2, "locked": False, "head": {"sha": pr_head_sha, "ref": "asdf"}, "user": {"login": "1234"}}, + ] report_size_deltas = get_reportsizedeltas_object() - mocker.patch("reportsizedeltas.ReportSizeDeltas.api_request", - autospec=True, - return_value={"json_data": json_data, - "additional_pages": True, - "page_count": 1}) + mocker.patch( + "reportsizedeltas.ReportSizeDeltas.api_request", + autospec=True, + return_value={"json_data": json_data, "additional_pages": True, "page_count": 1}, + ) mocker.patch("reportsizedeltas.ReportSizeDeltas.report_exists", autospec=True, return_value=False) - mocker.patch("reportsizedeltas.ReportSizeDeltas.get_artifact_download_url_for_sha", - autospec=True, - return_value=artifact_download_url) + mocker.patch( + "reportsizedeltas.ReportSizeDeltas.get_artifact_download_url_for_sha", + autospec=True, + return_value=artifact_download_url, + ) mocker.patch("reportsizedeltas.ReportSizeDeltas.get_artifact", autospec=True, return_value=artifact_folder_object) mocker.patch("reportsizedeltas.ReportSizeDeltas.get_sketches_reports", autospec=True, return_value=sketches_reports) mocker.patch("reportsizedeltas.ReportSizeDeltas.generate_report", autospec=True, return_value=report) @@ -321,18 +328,17 @@ def test_report_size_deltas_from_workflow_artifacts(mocker): unittest.mock.call(report_size_deltas, pr_number=pr_data["number"], pr_head_sha=json_data[0]["head"]["sha"]) ) get_artifact_download_url_for_sha_calls.append( - unittest.mock.call(report_size_deltas, - pr_user_login=pr_data["user"]["login"], - pr_head_ref=pr_data["head"]["ref"], - pr_head_sha=pr_data["head"]["sha"]) + unittest.mock.call( + report_size_deltas, + pr_user_login=pr_data["user"]["login"], + pr_head_ref=pr_data["head"]["ref"], + pr_head_sha=pr_data["head"]["sha"], + ) ) get_sketches_reports_calls.append( unittest.mock.call(report_size_deltas, artifact_folder_object=artifact_folder_object) ) - generate_report_calls.append( - unittest.mock.call(report_size_deltas, - sketches_reports=sketches_reports) - ) + generate_report_calls.append(unittest.mock.call(report_size_deltas, sketches_reports=sketches_reports)) comment_report_calls.append( unittest.mock.call(report_size_deltas, pr_number=pr_data["number"], report_markdown=report) ) @@ -352,15 +358,15 @@ def test_report_exists(): report_size_deltas = get_reportsizedeltas_object(repository_name=repository_name) json_data = [{"body": "foo123"}, {"body": report_size_deltas.report_key_beginning + pr_head_sha + "foo"}] - report_size_deltas.api_request = unittest.mock.MagicMock(return_value={"json_data": json_data, - "additional_pages": False, - "page_count": 1}) + report_size_deltas.api_request = unittest.mock.MagicMock( + return_value={"json_data": json_data, "additional_pages": False, "page_count": 1} + ) assert report_size_deltas.report_exists(pr_number=pr_number, pr_head_sha=pr_head_sha) - report_size_deltas.api_request.assert_called_once_with(request="repos/" + repository_name + "/issues/" - + str(pr_number) + "/comments", - page_number=1) + report_size_deltas.api_request.assert_called_once_with( + request="repos/" + repository_name + "/issues/" + str(pr_number) + "/comments", page_number=1 + ) assert not report_size_deltas.report_exists(pr_number=pr_number, pr_head_sha="asdf") @@ -376,36 +382,44 @@ def test_get_artifact_download_url_for_sha(): report_size_deltas = get_reportsizedeltas_object(repository_name=repository_name) json_data = {"workflow_runs": [{"head_sha": "foo123", "id": "1234"}, {"head_sha": pr_head_sha, "id": run_id}]} - report_size_deltas.api_request = unittest.mock.MagicMock(return_value={"json_data": json_data, - "additional_pages": True, - "page_count": 3}) + report_size_deltas.api_request = unittest.mock.MagicMock( + return_value={"json_data": json_data, "additional_pages": True, "page_count": 3} + ) report_size_deltas.get_artifact_download_url_for_run = unittest.mock.MagicMock(return_value=None) # No SHA match - assert report_size_deltas.get_artifact_download_url_for_sha(pr_user_login=pr_user_login, - pr_head_ref=pr_head_ref, - pr_head_sha="foosha") is None + assert ( + report_size_deltas.get_artifact_download_url_for_sha( + pr_user_login=pr_user_login, pr_head_ref=pr_head_ref, pr_head_sha="foosha" + ) + is None + ) # Test pagination request = "repos/" + repository_name + "/actions/runs" - request_parameters = ("actor=" + pr_user_login + "&branch=" + pr_head_ref + "&event=pull_request&status=completed") - calls = [unittest.mock.call(request=request, request_parameters=request_parameters, page_number=1), - unittest.mock.call(request=request, request_parameters=request_parameters, page_number=2), - unittest.mock.call(request=request, request_parameters=request_parameters, page_number=3)] + request_parameters = "actor=" + pr_user_login + "&branch=" + pr_head_ref + "&event=pull_request&status=completed" + calls = [ + unittest.mock.call(request=request, request_parameters=request_parameters, page_number=1), + unittest.mock.call(request=request, request_parameters=request_parameters, page_number=2), + unittest.mock.call(request=request, request_parameters=request_parameters, page_number=3), + ] report_size_deltas.api_request.assert_has_calls(calls) # SHA match, but no artifact for run - assert report_size_deltas.get_artifact_download_url_for_sha(pr_user_login=pr_user_login, - pr_head_ref=pr_head_ref, - pr_head_sha=pr_head_sha) is None + assert ( + report_size_deltas.get_artifact_download_url_for_sha( + pr_user_login=pr_user_login, pr_head_ref=pr_head_ref, pr_head_sha=pr_head_sha + ) + is None + ) report_size_deltas.get_artifact_download_url_for_run = unittest.mock.MagicMock(return_value=test_artifact_url) # SHA match, artifact match assert test_artifact_url == ( - report_size_deltas.get_artifact_download_url_for_sha(pr_user_login=pr_user_login, - pr_head_ref=pr_head_ref, - pr_head_sha=pr_head_sha) + report_size_deltas.get_artifact_download_url_for_sha( + pr_user_login=pr_user_login, pr_head_ref=pr_head_ref, pr_head_sha=pr_head_sha + ) ) report_size_deltas.get_artifact_download_url_for_run.assert_called_once_with(run_id=run_id) @@ -419,26 +433,19 @@ def test_get_artifact_download_url_for_run(expired, artifact_name): archive_download_url = "archive_download_url" run_id = "1234" - report_size_deltas = get_reportsizedeltas_object(repository_name=repository_name, - sketches_reports_source=sketches_reports_source) + report_size_deltas = get_reportsizedeltas_object( + repository_name=repository_name, sketches_reports_source=sketches_reports_source + ) json_data = { "artifacts": [ - { - "name": artifact_name, - "archive_download_url": archive_download_url, - "expired": expired - }, - { - "name": "bar123", - "archive_download_url": "wrong_artifact_url", - "expired": False - } + {"name": artifact_name, "archive_download_url": archive_download_url, "expired": expired}, + {"name": "bar123", "archive_download_url": "wrong_artifact_url", "expired": False}, ] } - report_size_deltas.api_request = unittest.mock.MagicMock(return_value={"json_data": json_data, - "additional_pages": False, - "page_count": 1}) + report_size_deltas.api_request = unittest.mock.MagicMock( + return_value={"json_data": json_data, "additional_pages": False, "page_count": 1} + ) download_url = report_size_deltas.get_artifact_download_url_for_run(run_id=run_id) if not expired and artifact_name == sketches_reports_source: @@ -447,14 +454,13 @@ def test_get_artifact_download_url_for_run(expired, artifact_name): assert download_url is None report_size_deltas.api_request.assert_called_once_with( - request="repos/" + repository_name + "/actions/runs/" + str(run_id) - + "/artifacts", - page_number=1) + request="repos/" + repository_name + "/actions/runs/" + str(run_id) + "/artifacts", page_number=1 + ) -@pytest.mark.parametrize("test_artifact_name, expected_success", - [("correct-artifact-name", True), - ("incorrect-artifact-name", False)]) +@pytest.mark.parametrize( + "test_artifact_name, expected_success", [("correct-artifact-name", True), ("incorrect-artifact-name", False)] +) def test_get_artifact(tmp_path, test_artifact_name, expected_success): artifact_source_path = test_data_path.joinpath("size-deltas-reports-new") @@ -478,8 +484,7 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): with artifact_folder_object as artifact_folder: # Verify that the artifact matches the source - assert directories_are_same(left_directory=artifact_source_path, - right_directory=artifact_folder) + assert directories_are_same(left_directory=artifact_source_path, right_directory=artifact_folder) else: with pytest.raises(expected_exception=urllib.error.URLError): report_size_deltas.get_artifact(artifact_download_url=artifact_download_url) @@ -488,9 +493,7 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): @pytest.mark.parametrize( "sketches_reports_path, expected_sketches_reports", [ - (test_data_path.joinpath("size-deltas-reports-old"), - [] - ), + (test_data_path.joinpath("size-deltas-reports-old"), []), ( test_data_path.joinpath("size-deltas-reports-new"), [ @@ -503,32 +506,20 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): report_keys.sizes: [ { report_keys.delta: { - report_keys.absolute: { - report_keys.maximum: -12, - report_keys.minimum: -12 - }, - report_keys.relative: { - report_keys.maximum: -0.05, - report_keys.minimum: -0.05 - } + report_keys.absolute: {report_keys.maximum: -12, report_keys.minimum: -12}, + report_keys.relative: {report_keys.maximum: -0.05, report_keys.minimum: -0.05}, }, report_keys.name: "flash", - report_keys.maximum: 28672 + report_keys.maximum: 28672, }, { report_keys.delta: { - report_keys.absolute: { - report_keys.maximum: 0, - report_keys.minimum: 0 - }, - report_keys.relative: { - report_keys.maximum: 0.00, - report_keys.minimum: 0.00 - } + report_keys.absolute: {report_keys.maximum: 0, report_keys.minimum: 0}, + report_keys.relative: {report_keys.maximum: 0.00, report_keys.minimum: 0.00}, }, report_keys.name: "RAM for global variables", - report_keys.maximum: 2560 - } + report_keys.maximum: 2560, + }, ], report_keys.sketches: [ { @@ -538,36 +529,36 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.current: { report_keys.absolute: 3494, - report_keys.relative: 12.19 + report_keys.relative: 12.19, }, report_keys.delta: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" + report_keys.relative: "N/A", }, report_keys.name: "flash", report_keys.maximum: 28672, report_keys.previous: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" - } + report_keys.relative: "N/A", + }, }, { report_keys.current: { report_keys.absolute: 153, - report_keys.relative: 5.97 + report_keys.relative: 5.97, }, report_keys.delta: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" + report_keys.relative: "N/A", }, report_keys.name: "RAM for global variables", report_keys.maximum: 2560, report_keys.previous: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" - } - } - ] + report_keys.relative: "N/A", + }, + }, + ], }, { report_keys.compilation_success: True, @@ -576,40 +567,34 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.current: { report_keys.absolute: 3462, - report_keys.relative: 12.07 - }, - report_keys.delta: { - report_keys.absolute: -12, - report_keys.relative: -0.05 + report_keys.relative: 12.07, }, + report_keys.delta: {report_keys.absolute: -12, report_keys.relative: -0.05}, report_keys.name: "flash", report_keys.maximum: 28672, report_keys.previous: { report_keys.absolute: 3474, - report_keys.relative: 12.12 - } + report_keys.relative: 12.12, + }, }, { report_keys.current: { report_keys.absolute: 149, - report_keys.relative: 5.82 - }, - report_keys.delta: { - report_keys.absolute: 0, - report_keys.relative: -0.00 + report_keys.relative: 5.82, }, + report_keys.delta: {report_keys.absolute: 0, report_keys.relative: -0.00}, report_keys.name: "RAM for global variables", report_keys.maximum: 2560, report_keys.previous: { report_keys.absolute: 149, - report_keys.relative: 5.82 - } - } - ] - } - ] + report_keys.relative: 5.82, + }, + }, + ], + }, + ], } - ] + ], }, { report_keys.commit_hash: "d8fd302", @@ -620,32 +605,20 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): report_keys.sizes: [ { report_keys.delta: { - report_keys.absolute: { - report_keys.maximum: -994, - report_keys.minimum: -994 - }, - report_keys.relative: { - report_keys.maximum: -3.08, - report_keys.minimum: -3.08 - } + report_keys.absolute: {report_keys.maximum: -994, report_keys.minimum: -994}, + report_keys.relative: {report_keys.maximum: -3.08, report_keys.minimum: -3.08}, }, report_keys.name: "flash", report_keys.maximum: 32256, }, { report_keys.delta: { - report_keys.absolute: { - report_keys.maximum: -175, - report_keys.minimum: -175 - }, - report_keys.relative: { - report_keys.maximum: -8.54, - report_keys.minimum: -8.54 - } + report_keys.absolute: {report_keys.maximum: -175, report_keys.minimum: -175}, + report_keys.relative: {report_keys.maximum: -8.54, report_keys.minimum: -8.54}, }, report_keys.name: "RAM for global variables", report_keys.maximum: 2048, - } + }, ], report_keys.sketches: [ { @@ -655,36 +628,36 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.current: { report_keys.absolute: 1460, - report_keys.relative: 4.53 + report_keys.relative: 4.53, }, report_keys.delta: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" + report_keys.relative: "N/A", }, report_keys.name: "flash", report_keys.maximum: 32256, report_keys.previous: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" - } + report_keys.relative: "N/A", + }, }, { report_keys.current: { report_keys.absolute: 190, - report_keys.relative: 9.28 + report_keys.relative: 9.28, }, report_keys.delta: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" + report_keys.relative: "N/A", }, report_keys.name: "RAM for global variables", report_keys.maximum: 2048, report_keys.previous: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" - } - } - ] + report_keys.relative: "N/A", + }, + }, + ], }, { report_keys.compilation_success: True, @@ -693,40 +666,37 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.current: { report_keys.absolute: 444, - report_keys.relative: 1.38 + report_keys.relative: 1.38, }, report_keys.delta: { report_keys.absolute: -994, - report_keys.relative: -3.08 + report_keys.relative: -3.08, }, report_keys.name: "flash", report_keys.maximum: 32256, report_keys.previous: { report_keys.absolute: 1438, - report_keys.relative: 4.46 - } + report_keys.relative: 4.46, + }, }, { - report_keys.current: { - report_keys.absolute: 9, - report_keys.relative: 0.44 - }, + report_keys.current: {report_keys.absolute: 9, report_keys.relative: 0.44}, report_keys.delta: { report_keys.absolute: -175, - report_keys.relative: -8.54 + report_keys.relative: -8.54, }, report_keys.name: "RAM for global variables", report_keys.maximum: 2048, report_keys.previous: { report_keys.absolute: 184, - report_keys.relative: 8.98 - } - } - ] - } - ] + report_keys.relative: 8.98, + }, + }, + ], + }, + ], } - ] + ], }, { report_keys.commit_hash: "54815a7d1a30fcb0d77d98242b158e7845c0516d", @@ -742,7 +712,7 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.name: "RAM for global variables", report_keys.maximum: "N/A", - } + }, ], report_keys.sketches: [ { @@ -752,7 +722,7 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.current: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" + report_keys.relative: "N/A", }, report_keys.name: "flash", report_keys.maximum: "N/A", @@ -760,12 +730,12 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.current: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" + report_keys.relative: "N/A", }, report_keys.name: "RAM for global variables", report_keys.maximum: "N/A", - } - ] + }, + ], }, { report_keys.compilation_success: True, @@ -774,7 +744,7 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.current: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" + report_keys.relative: "N/A", }, report_keys.name: "flash", report_keys.maximum: "N/A", @@ -782,28 +752,27 @@ def test_get_artifact(tmp_path, test_artifact_name, expected_success): { report_keys.current: { report_keys.absolute: "N/A", - report_keys.relative: "N/A" + report_keys.relative: "N/A", }, report_keys.name: "RAM for global variables", report_keys.maximum: "N/A", - } - ] + }, + ], }, - ] + ], } - ] + ], }, - ] - ) - ] + ], + ), + ], ) def test_get_sketches_reports(sketches_reports_path, expected_sketches_reports): report_size_deltas = get_reportsizedeltas_object() artifact_folder_object = tempfile.TemporaryDirectory(prefix="test_reportsizedeltas-") try: - distutils.dir_util.copy_tree(src=str(sketches_reports_path), - dst=artifact_folder_object.name) + distutils.dir_util.copy_tree(src=str(sketches_reports_path), dst=artifact_folder_object.name) except Exception: # pragma: no cover artifact_folder_object.cleanup() raise @@ -822,33 +791,21 @@ def test_get_sketches_reports(sketches_reports_path, expected_sketches_reports): report_keys.sizes: [ { report_keys.delta: { - report_keys.absolute: { - report_keys.maximum: -994, - report_keys.minimum: -994 - }, - report_keys.relative: { - report_keys.maximum: -3.08, - report_keys.minimum: -3.08 - } + report_keys.absolute: {report_keys.maximum: -994, report_keys.minimum: -994}, + report_keys.relative: {report_keys.maximum: -3.08, report_keys.minimum: -3.08}, }, report_keys.name: "flash", report_keys.maximum: 32256, }, { report_keys.delta: { - report_keys.absolute: { - report_keys.maximum: -175, - report_keys.minimum: -175 - }, - report_keys.relative: { - report_keys.maximum: -8.54, - report_keys.minimum: -8.54 - } + report_keys.absolute: {report_keys.maximum: -175, report_keys.minimum: -175}, + report_keys.relative: {report_keys.maximum: -8.54, report_keys.minimum: -8.54}, }, report_keys.name: "RAM for global variables", report_keys.maximum: 2048, - } - ] + }, + ], }, [ ["Board", "flash", "%", "RAM for global variables", "%"], @@ -857,9 +814,9 @@ def test_get_sketches_reports(sketches_reports_path, expected_sketches_reports): ":green_heart: -994 - -994", "-3.08 - -3.08", ":green_heart: -175 - -175", - "-8.54 - -8.54" - ] - ] + "-8.54 - -8.54", + ], + ], ), ( [ @@ -869,8 +826,8 @@ def test_get_sketches_reports(sketches_reports_path, expected_sketches_reports): ":green_heart: -994 - -994", "-3.08 - -3.08", ":green_heart: -175 - -175", - "-8.54 - -8.54" - ] + "-8.54 - -8.54", + ], ], { report_keys.board: "arduino:mbed_portenta:envie_m7", @@ -882,8 +839,8 @@ def test_get_sketches_reports(sketches_reports_path, expected_sketches_reports): { report_keys.name: "RAM for global variables", report_keys.maximum: "N/A", - } - ] + }, + ], }, [ ["Board", "flash", "%", "RAM for global variables", "%"], @@ -892,12 +849,12 @@ def test_get_sketches_reports(sketches_reports_path, expected_sketches_reports): ":green_heart: -994 - -994", "-3.08 - -3.08", ":green_heart: -175 - -175", - "-8.54 - -8.54" + "-8.54 - -8.54", ], - ["`arduino:mbed_portenta:envie_m7`", "N/A", "N/A", "N/A", "N/A"] - ] - ) - ] + ["`arduino:mbed_portenta:envie_m7`", "N/A", "N/A", "N/A", "N/A"], + ], + ), + ], ) def test_add_summary_report_row(report_data, fqbn_data, expected_report_data): report_size_deltas = get_reportsizedeltas_object() @@ -919,50 +876,32 @@ def test_add_summary_report_row(report_data, fqbn_data, expected_report_data): report_keys.name: "examples/Foo", report_keys.sizes: [ { - report_keys.current: { - report_keys.absolute: 3462, - report_keys.relative: 12.07 - }, - report_keys.delta: { - report_keys.absolute: -12, - report_keys.relative: -0.05 - }, + report_keys.current: {report_keys.absolute: 3462, report_keys.relative: 12.07}, + report_keys.delta: {report_keys.absolute: -12, report_keys.relative: -0.05}, report_keys.name: "flash", report_keys.maximum: 28672, - report_keys.previous: { - report_keys.absolute: 3474, - report_keys.relative: 12.12 - } + report_keys.previous: {report_keys.absolute: 3474, report_keys.relative: 12.12}, }, { - report_keys.current: { - report_keys.absolute: 149, - report_keys.relative: 5.82 - }, - report_keys.delta: { - report_keys.absolute: 0, - report_keys.relative: -0.00 - }, + report_keys.current: {report_keys.absolute: 149, report_keys.relative: 5.82}, + report_keys.delta: {report_keys.absolute: 0, report_keys.relative: -0.00}, report_keys.name: "RAM for global variables", report_keys.maximum: 2560, - report_keys.previous: { - report_keys.absolute: 149, - report_keys.relative: 5.82 - } - } - ] + report_keys.previous: {report_keys.absolute: 149, report_keys.relative: 5.82}, + }, + ], } - ] + ], }, [ ["Board", "`examples/Foo`
flash", "%", "`examples/Foo`
RAM for global variables", "%"], - ["`arduino:avr:leonardo`", -12, -0.05, 0, -0.0] - ] + ["`arduino:avr:leonardo`", -12, -0.05, 0, -0.0], + ], ), ( [ ["Board", "`examples/Foo`
flash", "%", "`examples/Foo`
RAM for global variables", "%"], - ["`arduino:avr:leonardo`", -12, -0.05, 0, -0.0] + ["`arduino:avr:leonardo`", -12, -0.05, 0, -0.0], ], { report_keys.board: "arduino:mbed_portenta:envie_m7", @@ -972,32 +911,26 @@ def test_add_summary_report_row(report_data, fqbn_data, expected_report_data): report_keys.name: "examples/Foo", report_keys.sizes: [ { - report_keys.current: { - report_keys.absolute: "N/A", - report_keys.relative: "N/A" - }, + report_keys.current: {report_keys.absolute: "N/A", report_keys.relative: "N/A"}, report_keys.name: "flash", report_keys.maximum: "N/A", }, { - report_keys.current: { - report_keys.absolute: "N/A", - report_keys.relative: "N/A" - }, + report_keys.current: {report_keys.absolute: "N/A", report_keys.relative: "N/A"}, report_keys.name: "RAM for global variables", report_keys.maximum: "N/A", - } - ] + }, + ], } - ] + ], }, [ ["Board", "`examples/Foo`
flash", "%", "`examples/Foo`
RAM for global variables", "%"], ["`arduino:avr:leonardo`", -12, -0.05, 0, -0.0], - ["`arduino:mbed_portenta:envie_m7`", "N/A", "N/A", "N/A", "N/A"] - ] - ) - ] + ["`arduino:mbed_portenta:envie_m7`", "N/A", "N/A", "N/A", "N/A"], + ], + ), + ], ) def test_add_detailed_report_row(report_data, fqbn_data, expected_report_data): report_size_deltas = get_reportsizedeltas_object() @@ -1040,8 +973,7 @@ def test_generate_report(): artifact_folder_object = tempfile.TemporaryDirectory(prefix="test_reportsizedeltas-") try: - distutils.dir_util.copy_tree(src=str(sketches_report_path), - dst=artifact_folder_object.name) + distutils.dir_util.copy_tree(src=str(sketches_report_path), dst=artifact_folder_object.name) except Exception: # pragma: no cover artifact_folder_object.cleanup() raise @@ -1051,22 +983,24 @@ def test_generate_report(): assert report == expected_deltas_report -@pytest.mark.parametrize("show_emoji, minimum, maximum, expected_value", - [(True, "N/A", "N/A", "N/A"), - (True, -1, 0, ":green_heart: -1 - 0"), - (False, -1, 0, "-1 - 0"), - (True, 0, 0, "0 - 0"), - (True, 0, 1, ":small_red_triangle: 0 - +1"), - (True, 1, 1, ":small_red_triangle: +1 - +1"), - (True, -1, 1, ":grey_question: -1 - +1")]) +@pytest.mark.parametrize( + "show_emoji, minimum, maximum, expected_value", + [ + (True, "N/A", "N/A", "N/A"), + (True, -1, 0, ":green_heart: -1 - 0"), + (False, -1, 0, "-1 - 0"), + (True, 0, 0, "0 - 0"), + (True, 0, 1, ":small_red_triangle: 0 - +1"), + (True, 1, 1, ":small_red_triangle: +1 - +1"), + (True, -1, 1, ":grey_question: -1 - +1"), + ], +) def test_get_summary_value(show_emoji, minimum, maximum, expected_value): report_size_deltas = get_reportsizedeltas_object() - assert report_size_deltas.get_summary_value( - show_emoji=show_emoji, - minimum=minimum, - maximum=maximum - ) == expected_value + assert ( + report_size_deltas.get_summary_value(show_emoji=show_emoji, minimum=minimum, maximum=maximum) == expected_value + ) def test_comment_report(): @@ -1085,15 +1019,13 @@ def test_comment_report(): report_data = report_data.encode(encoding="utf-8") report_size_deltas.http_request.assert_called_once_with( - url="https://api.github.com/repos/" + repository_name + "/issues/" - + str(pr_number) + "/comments", - data=report_data) + url="https://api.github.com/repos/" + repository_name + "/issues/" + str(pr_number) + "/comments", + data=report_data, + ) def test_api_request(): - response_data = {"json_data": {"foo": "bar"}, - "additional_pages": False, - "page_count": 1} + response_data = {"json_data": {"foo": "bar"}, "additional_pages": False, "page_count": 1} request = "test_request" request_parameters = "test_parameters" page_number = 1 @@ -1102,12 +1034,18 @@ def test_api_request(): report_size_deltas.get_json_response = unittest.mock.MagicMock(return_value=response_data) - assert response_data == report_size_deltas.api_request(request=request, - request_parameters=request_parameters, - page_number=page_number) + assert response_data == report_size_deltas.api_request( + request=request, request_parameters=request_parameters, page_number=page_number + ) report_size_deltas.get_json_response.assert_called_once_with( - url="https://api.github.com/" + request + "?" + request_parameters - + "&page=" + str(page_number) + "&per_page=100") + url="https://api.github.com/" + + request + + "?" + + request_parameters + + "&page=" + + str(page_number) + + "&per_page=100" + ) def test_get_json_response(): @@ -1141,9 +1079,13 @@ def test_get_json_response(): assert not response_data["additional_pages"] assert 1 == response_data["page_count"] - response = {"headers": {"Link": '; rel="next", ' - '"; rel="last"'}, - "body": "[42]"} + response = { + "headers": { + "Link": '; rel="next", ' + '"; rel="last"' + }, + "body": "[42]", + } report_size_deltas.http_request = unittest.mock.MagicMock(return_value=response) # Non-empty body, Link field is populated @@ -1180,8 +1122,7 @@ def test_raw_http_request(mocker): request = "test_request" urlopen_return = unittest.mock.sentinel.urlopen_return - report_size_deltas = get_reportsizedeltas_object(repository_name=user_name + "/FooRepositoryName", - token=token) + report_size_deltas = get_reportsizedeltas_object(repository_name=user_name + "/FooRepositoryName", token=token) mocker.patch.object(urllib.request, "Request", autospec=True, return_value=request) mocker.patch("reportsizedeltas.ReportSizeDeltas.handle_rate_limiting", autospec=True) @@ -1189,10 +1130,9 @@ def test_raw_http_request(mocker): report_size_deltas.raw_http_request(url=url, data=data) - urllib.request.Request.assert_called_once_with(url=url, - headers={"Authorization": "token " + token, - "User-Agent": user_name}, - data=data) + urllib.request.Request.assert_called_once_with( + url=url, headers={"Authorization": "token " + token, "User-Agent": user_name}, data=data + ) # URL is subject to GitHub API rate limiting report_size_deltas.handle_rate_limiting.assert_called_once() @@ -1234,44 +1174,56 @@ def test_determine_urlopen_retry_true(mocker): mocker.patch("time.sleep", autospec=True) assert reportsizedeltas.determine_urlopen_retry( - exception=urllib.error.HTTPError(None, 502, "Bad Gateway", None, None)) + exception=urllib.error.HTTPError(None, 502, "Bad Gateway", None, None) + ) def test_determine_urlopen_retry_false(): assert not reportsizedeltas.determine_urlopen_retry( - exception=urllib.error.HTTPError(None, 401, "Unauthorized", None, None)) + exception=urllib.error.HTTPError(None, 401, "Unauthorized", None, None) + ) def test_get_page_count(): page_count = 4 - link_header = ('; rel="next", ' - '"; rel="last"') + link_header = ( + '; rel="next", ' + '"; rel="last"' + ) assert page_count == reportsizedeltas.get_page_count(link_header=link_header) -@pytest.mark.parametrize("report, column_heading, expected_column_number, expected_report", - [([["Board", "foo memory type", "%"], ["foo board", 12, 234]], - "foo memory type", - 1, - [["Board", "foo memory type", "%"], ["foo board", 12, 234]]), - ([["Board", "foo memory type", "%"], ["foo board", 12, 234, "bar board"]], - "bar memory type", - 3, - [["Board", "foo memory type", "%", "bar memory type", "%"], - ["foo board", 12, 234, "bar board", "", ""]])]) +@pytest.mark.parametrize( + "report, column_heading, expected_column_number, expected_report", + [ + ( + [["Board", "foo memory type", "%"], ["foo board", 12, 234]], + "foo memory type", + 1, + [["Board", "foo memory type", "%"], ["foo board", 12, 234]], + ), + ( + [["Board", "foo memory type", "%"], ["foo board", 12, 234, "bar board"]], + "bar memory type", + 3, + [["Board", "foo memory type", "%", "bar memory type", "%"], ["foo board", 12, 234, "bar board", "", ""]], + ), + ], +) def test_get_report_column_number(report, column_heading, expected_column_number, expected_report): - assert reportsizedeltas.get_report_column_number( - report=report, - column_heading=column_heading - ) == expected_column_number + assert ( + reportsizedeltas.get_report_column_number(report=report, column_heading=column_heading) + == expected_column_number + ) assert report == expected_report def test_generate_markdown_table(): - assert reportsizedeltas.generate_markdown_table( - row_list=[["Board", "Flash", "RAM"], ["foo:bar:baz", 42, 11]] - ) == "Board|Flash|RAM\n-|-|-\nfoo:bar:baz|42|11\n" + assert ( + reportsizedeltas.generate_markdown_table(row_list=[["Board", "Flash", "RAM"], ["foo:bar:baz", 42, 11]]) + == "Board|Flash|RAM\n-|-|-\nfoo:bar:baz|42|11\n" + ) def test_generate_csv_table():