From a70e3d2c3307577c76dd8a1f4394d0784e3990fd Mon Sep 17 00:00:00 2001 From: Catherine Lee Date: Fri, 10 Jan 2025 15:15:04 -0800 Subject: [PATCH 1/2] tc --- .github/actions/bc-lint/action.yml | 2 +- .github/actions/binary-upload/action.yml | 4 ++-- .github/scripts/validate_binaries.sh | 2 +- .github/workflows/lint.yml | 2 +- .lintrunner.toml | 1 + aws/lambda/log-classifier/scripts/download_logs.py | 2 +- aws/websites/download.pytorch.org/pep503_whl_redirect.js | 4 ++-- .../lambdas/runner-binaries-syncer/.prettierrc | 1 - .../policies/instance-ec2-create-tags-policy.json | 2 +- tools/analytics/download_count_wheels.py | 2 +- tools/analytics/github_analyze.py | 8 ++++---- tools/analytics/s3_test_stats_analyze.py | 8 ++++---- tools/analytics/validate_pypi_staging.py | 8 ++++---- tools/binary_size_validation/README.md | 4 ++-- tools/scripts/generate_binary_build_matrix.py | 6 +++--- tools/tests/README.md | 2 +- 16 files changed, 29 insertions(+), 29 deletions(-) diff --git a/.github/actions/bc-lint/action.yml b/.github/actions/bc-lint/action.yml index cd29985164..a4c945bf85 100644 --- a/.github/actions/bc-lint/action.yml +++ b/.github/actions/bc-lint/action.yml @@ -1,5 +1,5 @@ name: 'BC Lint Action' -description: 'A reusable action for running the BC Lint workflow. +description: 'A reusable action for running the BC Lint workflow. See https://github.com/pytorch/test-infra/wiki/BC-Linter for more information.' inputs: repo: diff --git a/.github/actions/binary-upload/action.yml b/.github/actions/binary-upload/action.yml index 5a565625ac..2ee1621b71 100644 --- a/.github/actions/binary-upload/action.yml +++ b/.github/actions/binary-upload/action.yml @@ -25,10 +25,10 @@ runs: working-directory: ${{ inputs.repository }} run: | set -euxo pipefail - + # shellcheck disable=SC1090 source "${BUILD_ENV_FILE}" - + pip install awscli==1.32.18 yum install -y jq diff --git a/.github/scripts/validate_binaries.sh b/.github/scripts/validate_binaries.sh index 9182505b68..16a39fb77f 100755 --- a/.github/scripts/validate_binaries.sh +++ b/.github/scripts/validate_binaries.sh @@ -81,7 +81,7 @@ else if [[ ${MATRIX_GPU_ARCH_VERSION} == "12.6" || ${MATRIX_GPU_ARCH_TYPE} == "xpu" || ${MATRIX_GPU_ARCH_TYPE} == "rocm" ]]; then export DESIRED_DEVTOOLSET="cxx11-abi" - # TODO: enable torch-compile on ROCM + # TODO: enable torch-compile on ROCM if [[ ${MATRIX_GPU_ARCH_TYPE} == "rocm" ]]; then TEST_SUFFIX=${TEST_SUFFIX}" --torch-compile-check disabled" fi diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index e9d0cadb7f..66424551f8 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -37,7 +37,7 @@ jobs: - name: Run lintrunner on all files - Linux run: | set +e - if ! lintrunner -v --force-color --all-files --tee-json=lint.json --take ACTIONLINT,MYPY,RUSTFMT,COPYRIGHT,LINTRUNNER_VERSION,UFMT,NEWLINE,TABS; then + if ! lintrunner -v --force-color --all-files --tee-json=lint.json --take ACTIONLINT,MYPY,RUSTFMT,COPYRIGHT,LINTRUNNER_VERSION,UFMT,NEWLINE,TABS,SPACES; then echo "" echo -e "\e[1m\e[36mYou can reproduce these results locally by using \`lintrunner -m main\`.\e[0m" exit 1 diff --git a/.lintrunner.toml b/.lintrunner.toml index f66e9e7526..740581fd69 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -132,6 +132,7 @@ exclude_patterns = [ '**/*.patch', '**/fixtures/**', '**/snapshots/**', + '.github/actions/setup-ssh/index.js', ] command = [ 'python3', diff --git a/aws/lambda/log-classifier/scripts/download_logs.py b/aws/lambda/log-classifier/scripts/download_logs.py index 90e89d0435..40df993683 100644 --- a/aws/lambda/log-classifier/scripts/download_logs.py +++ b/aws/lambda/log-classifier/scripts/download_logs.py @@ -8,7 +8,7 @@ def read_log_dataset(file_location): """ Reads a log dataset from a CSV file and returns a list of dictionaries. - The CSV file should have the following schema: + The CSV file should have the following schema: "id","startTime","conclusion","dynamoKey","name","job_name" Args: diff --git a/aws/websites/download.pytorch.org/pep503_whl_redirect.js b/aws/websites/download.pytorch.org/pep503_whl_redirect.js index e3098ccb09..7d861e8626 100644 --- a/aws/websites/download.pytorch.org/pep503_whl_redirect.js +++ b/aws/websites/download.pytorch.org/pep503_whl_redirect.js @@ -4,7 +4,7 @@ function handler(event) { var uri_parts = uri.split('/') var last_uri_part = uri_parts[uri_parts.length -1] var rocm_pattern = /^rocm[0-9]+(\.[0-9]+)*$/ - + if (uri.startsWith('/whl')) { // Check whether the URI is missing a file name. if (uri.endsWith('/')) { @@ -18,7 +18,7 @@ function handler(event) { request.uri += '/index.html'; } } - + // Similar behavior for libtorch if (uri.startsWith('/libtorch')) { // Check whether the URI is missing a file name. diff --git a/terraform-aws-github-runner/modules/runner-binaries-syncer/lambdas/runner-binaries-syncer/.prettierrc b/terraform-aws-github-runner/modules/runner-binaries-syncer/lambdas/runner-binaries-syncer/.prettierrc index 01e6eca132..587ffe1a74 100644 --- a/terraform-aws-github-runner/modules/runner-binaries-syncer/lambdas/runner-binaries-syncer/.prettierrc +++ b/terraform-aws-github-runner/modules/runner-binaries-syncer/lambdas/runner-binaries-syncer/.prettierrc @@ -4,4 +4,3 @@ "trailingComma": "all", "semi": true, } - \ No newline at end of file diff --git a/terraform-aws-github-runner/modules/runners-instances/policies/instance-ec2-create-tags-policy.json b/terraform-aws-github-runner/modules/runners-instances/policies/instance-ec2-create-tags-policy.json index 5f997c6b89..db569909b4 100644 --- a/terraform-aws-github-runner/modules/runners-instances/policies/instance-ec2-create-tags-policy.json +++ b/terraform-aws-github-runner/modules/runners-instances/policies/instance-ec2-create-tags-policy.json @@ -1,5 +1,5 @@ { - "Version": "2012-10-17", + "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", diff --git a/tools/analytics/download_count_wheels.py b/tools/analytics/download_count_wheels.py index d3562fb16c..277edaa164 100644 --- a/tools/analytics/download_count_wheels.py +++ b/tools/analytics/download_count_wheels.py @@ -138,7 +138,7 @@ def output_results(bytes_cache: dict) -> None: def download_logs(log_directory: str, since: float): dt_now = datetime.now(timezone.utc) dt_end = datetime(dt_now.year, dt_now.month, dt_now.day, tzinfo=timezone.utc) - dt_start = dt_end - timedelta(days=1, hours=1) # Add 1 hour padding to account for potentially missed logs due to timing + dt_start = dt_end - timedelta(days=1, hours=1) # Add 1 hour padding to account for potentially missed logs due to timing for key in tqdm(BUCKET.objects.filter(Prefix='cflogs')): remote_fname = key.key local_fname = os.path.join(log_directory, remote_fname) diff --git a/tools/analytics/github_analyze.py b/tools/analytics/github_analyze.py index c6da4d8ca8..b6a37aaf3a 100755 --- a/tools/analytics/github_analyze.py +++ b/tools/analytics/github_analyze.py @@ -406,7 +406,7 @@ def get_commits_dict(x, y): print(f"issue_num: {issue_num}, len(issue_comments)={len(current_issue_comments)}") print("URL;Title;Status") - # Iterate over the previous release branch to find potentially missing cherry picks in the current issue. + # Iterate over the previous release branch to find potentially missing cherry picks in the current issue. for commit in prev_release_commits.values(): not_cherry_picked_in_current_issue = any(commit.pr_url not in issue_comment['body'] for issue_comment in current_issue_comments) for main_commit in main_commits.values(): @@ -475,7 +475,7 @@ def main(): if args.analyze_stacks: analyze_stacks(repo) return - + # Use milestone idx or search it along milestone titles try: milestone_idx = int(args.milestone_id) @@ -491,11 +491,11 @@ def main(): if args.missing_in_branch: commits_missing_in_branch(repo, - args.branch, + args.branch, f'orig/{args.branch}', milestone_idx) return - + if args.missing_in_release: commits_missing_in_release(repo, args.branch, diff --git a/tools/analytics/s3_test_stats_analyze.py b/tools/analytics/s3_test_stats_analyze.py index 74b4f6de8d..d8512c5032 100644 --- a/tools/analytics/s3_test_stats_analyze.py +++ b/tools/analytics/s3_test_stats_analyze.py @@ -33,7 +33,7 @@ def _get_latests_git_commit_sha_list(lookback: int): def _json_to_df(data: Dict[str, Any], granularity: str) -> pd.DataFrame: reformed_data = list() for fname, fdata in data['files'].items(): - if granularity == 'file': + if granularity == 'file': reformed_data.append({ "job": data['job'], "sha": data['sha'], @@ -42,7 +42,7 @@ def _json_to_df(data: Dict[str, Any], granularity: str) -> pd.DataFrame: }) else: for sname, sdata in fdata['suites'].items(): - if granularity == 'suite': + if granularity == 'suite': reformed_data.append({ "job": data['job'], "sha": data['sha'], @@ -140,8 +140,8 @@ def main(): dataframe = parse_and_export_stats(f'{cache_folder}/test_time/', granularity) dataframe.to_pickle(output) - + if __name__ == "__main__": main() - + diff --git a/tools/analytics/validate_pypi_staging.py b/tools/analytics/validate_pypi_staging.py index 5321313dfc..1be8b0f852 100644 --- a/tools/analytics/validate_pypi_staging.py +++ b/tools/analytics/validate_pypi_staging.py @@ -16,10 +16,10 @@ "macosx_11_0_arm64", ] PYTHON_VERSIONS = [ - "cp38", - "cp39", - "cp310", - "cp311", + "cp38", + "cp39", + "cp310", + "cp311", "cp312" ] S3_PYPI_STAGING = "pytorch-backup" diff --git a/tools/binary_size_validation/README.md b/tools/binary_size_validation/README.md index c8c0653902..559419f5da 100644 --- a/tools/binary_size_validation/README.md +++ b/tools/binary_size_validation/README.md @@ -1,6 +1,6 @@ # PyTorch Wheel Binary Size Validation -A script to fetch and validate the binary size of PyTorch wheels +A script to fetch and validate the binary size of PyTorch wheels in the given channel (test, nightly) against the given threshold. @@ -11,7 +11,7 @@ pip install -r requirements.txt ``` ### Usage - + ```bash # print help python binary_size_validation.py --help diff --git a/tools/scripts/generate_binary_build_matrix.py b/tools/scripts/generate_binary_build_matrix.py index 559f3ffb39..7001f28ae9 100755 --- a/tools/scripts/generate_binary_build_matrix.py +++ b/tools/scripts/generate_binary_build_matrix.py @@ -2,7 +2,7 @@ """Generates a matrix to be utilized through github actions -Important. After making changes to this file please run following command: +Important. After making changes to this file please run following command: python -m tools.tests.test_generate_binary_build_matrix --update-reference-files Will output a condensed version of the matrix if on a pull request that only @@ -375,8 +375,8 @@ def generate_libtorch_matrix( gpu_arch_type = arch_type(arch_version) gpu_arch_version = "" if arch_version == CPU else arch_version - # Rocm builds where removed for pre-cxx11 abi - if gpu_arch_type == "rocm" and abi_version == PRE_CXX11_ABI: + # Rocm builds where removed for pre-cxx11 abi + if gpu_arch_type == "rocm" and abi_version == PRE_CXX11_ABI: continue desired_cuda = translate_desired_cuda(gpu_arch_type, gpu_arch_version) diff --git a/tools/tests/README.md b/tools/tests/README.md index fae2240c3d..db202303f1 100644 --- a/tools/tests/README.md +++ b/tools/tests/README.md @@ -1,5 +1,5 @@ # Testing during CI -The tests in this folder are automatically executed during CI by `.github/workflows/tests.yml`. +The tests in this folder are automatically executed during CI by `.github/workflows/tests.yml`. If you add a new test that requires installing additional modules, please update the `pip install` command in that workflow. From b69ee2dcd5e5940cd9cf4ff5990a0b842c4e2c68 Mon Sep 17 00:00:00 2001 From: Catherine Lee Date: Fri, 10 Jan 2025 15:28:43 -0800 Subject: [PATCH 2/2] tc --- tools/analytics/s3_test_stats_analyze.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/analytics/s3_test_stats_analyze.py b/tools/analytics/s3_test_stats_analyze.py index d8512c5032..78ea3a7fd8 100644 --- a/tools/analytics/s3_test_stats_analyze.py +++ b/tools/analytics/s3_test_stats_analyze.py @@ -144,4 +144,3 @@ def main(): if __name__ == "__main__": main() -