Skip to content

Commit 29d71c8

Browse files
committed
Update on "Properly type CachedFunction & rename to CachedMethod"
Previously, I was unsure how to properly type the parameters of a decorated method. Then I found python/mypy#13222 (comment) which explains how to use `Concatenate` to hackily achieve it. Not entirely sure why we can't write a user-defined version of `Callable` that works seamlessly for both functions and methods... cc voznesenskym penguinwu EikanWang jgong5 Guobing-Chen XiaobingSuper zhuhaozhe blzheng wenzhe-nrv jiayisunx peterbell10 ipiszy yf225 chenyang78 kadeng muchulee8 aakhundov ColinPeppler [ghstack-poisoned]
2 parents d76e69c + d9b1f88 commit 29d71c8

File tree

480 files changed

+15044
-5291
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

480 files changed

+15044
-5291
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
9682172576d5d9a10f3162ad91e0a32b384a3b7c
1+
f4578fc150f1690be27fd1ba3258b35a20d9c39d

.ci/docker/requirements-ci.txt

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -292,3 +292,9 @@ tensorboard==2.13.0
292292
#Description: Also included in .ci/docker/requirements-docs.txt
293293
#Pinned versions:
294294
#test that import: test_tensorboard
295+
296+
pywavelets==1.4.1
297+
#Description: This is a requirement of scikit-image, we need to pin
298+
# it here because 1.5.0 conflicts with numpy 1.21.2 used in CI
299+
#Pinned versions: 1.4.1
300+
#test that import:

.ci/pytorch/test.sh

Lines changed: 10 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -332,7 +332,7 @@ if [[ "${TEST_CONFIG}" == *dynamic* ]]; then
332332
DYNAMO_BENCHMARK_FLAGS+=(--dynamic-shapes --dynamic-batch-only)
333333
fi
334334

335-
if [[ "${TEST_CONFIG}" == *cpu_accuracy* ]]; then
335+
if [[ "${TEST_CONFIG}" == *cpu_inductor* ]]; then
336336
DYNAMO_BENCHMARK_FLAGS+=(--device cpu)
337337
else
338338
DYNAMO_BENCHMARK_FLAGS+=(--device cuda)
@@ -451,19 +451,12 @@ test_single_dynamo_benchmark() {
451451
"${DYNAMO_BENCHMARK_FLAGS[@]}" \
452452
"$@" "${partition_flags[@]}" \
453453
--output "$TEST_REPORTS_DIR/${name}_${suite}.csv"
454-
455-
if [[ "${TEST_CONFIG}" == *inductor* ]] && [[ "${TEST_CONFIG}" != *cpu_accuracy* ]]; then
456-
# other jobs (e.g. periodic, cpu-accuracy) may have different set of expected models.
457-
python benchmarks/dynamo/check_accuracy.py \
458-
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
459-
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
460-
python benchmarks/dynamo/check_graph_breaks.py \
461-
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
462-
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
463-
else
464-
python benchmarks/dynamo/check_csv.py \
465-
-f "$TEST_REPORTS_DIR/${name}_${suite}.csv"
466-
fi
454+
python benchmarks/dynamo/check_accuracy.py \
455+
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
456+
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
457+
python benchmarks/dynamo/check_graph_breaks.py \
458+
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
459+
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
467460
fi
468461
}
469462

@@ -481,7 +474,7 @@ test_dynamo_benchmark() {
481474
elif [[ "${TEST_CONFIG}" == *perf* ]]; then
482475
test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@"
483476
else
484-
if [[ "${TEST_CONFIG}" == *cpu_accuracy* ]]; then
477+
if [[ "${TEST_CONFIG}" == *cpu_inductor* ]]; then
485478
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --float32 "$@"
486479
elif [[ "${TEST_CONFIG}" == *aot_inductor* ]]; then
487480
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --bfloat16 "$@"
@@ -1063,7 +1056,7 @@ elif [[ "${TEST_CONFIG}" == *timm* ]]; then
10631056
id=$((SHARD_NUMBER-1))
10641057
test_dynamo_benchmark timm_models "$id"
10651058
elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
1066-
if [[ "${TEST_CONFIG}" == *cpu_accuracy* ]]; then
1059+
if [[ "${TEST_CONFIG}" == *cpu_inductor* ]]; then
10671060
install_torchaudio cpu
10681061
else
10691062
install_torchaudio cuda
@@ -1080,7 +1073,7 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
10801073
checkout_install_torchbench
10811074
# Do this after checkout_install_torchbench to ensure we clobber any
10821075
# nightlies that torchbench may pull in
1083-
if [[ "${TEST_CONFIG}" != *cpu_accuracy* ]]; then
1076+
if [[ "${TEST_CONFIG}" != *cpu_inductor* ]]; then
10841077
install_torchrec_and_fbgemm
10851078
fi
10861079
PYTHONPATH=$(pwd)/torchbench test_dynamo_benchmark torchbench "$id"

.circleci/scripts/binary_populate_env.sh

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -77,15 +77,8 @@ else
7777
export PYTORCH_BUILD_VERSION="${BASE_BUILD_VERSION}+$DESIRED_CUDA"
7878
fi
7979

80-
# The build with with-pypi-cudnn suffix is only applicabe to
81-
# pypi small wheel Linux x86 build
82-
if [[ -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]] && [[ "$(uname)" == 'Linux' && "$(uname -m)" == "x86_64" ]]; then
83-
export PYTORCH_BUILD_VERSION="${PYTORCH_BUILD_VERSION}-with-pypi-cudnn"
84-
fi
85-
8680
export PYTORCH_BUILD_NUMBER=1
8781

88-
8982
JAVA_HOME=
9083
BUILD_JNI=OFF
9184
if [[ "$PACKAGE_TYPE" == libtorch ]]; then

.circleci/scripts/binary_upload.sh

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,6 @@ UPLOAD_BUCKET="s3://pytorch"
1616
BACKUP_BUCKET="s3://pytorch-backup"
1717
BUILD_NAME=${BUILD_NAME:-}
1818

19-
# this is temporary change to upload pypi-cudnn builds to separate folder
20-
if [[ ${BUILD_NAME} == *with-pypi-cudnn* ]]; then
21-
UPLOAD_SUBFOLDER="${UPLOAD_SUBFOLDER}_pypi_cudnn"
22-
fi
23-
2419
DRY_RUN=${DRY_RUN:-enabled}
2520
# Don't actually do work unless explicit
2621
ANACONDA="true anaconda"

.flake8

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,9 @@ ignore =
2626
# TorchFix codes that don't make sense for PyTorch itself:
2727
# removed and deprecated PyTorch functions.
2828
TOR001,TOR101,
29+
# TODO(kit1980): fix all TOR102 issues
30+
# `torch.load` without `weights_only` parameter is unsafe
31+
TOR102,
2932
per-file-ignores =
3033
__init__.py: F401
3134
torch/utils/cpp_extension.py: B950

.github/ci_commit_pins/audio.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
a8f4e97bd5356a7a77510cdf6a3a62e25a5dc602
1+
db624844f5c95bb7618fe5a5f532bf9b68efeb45

.github/ci_commit_pins/vision.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
4433680aa57439ed684f9854fac3443b76e03c03
1+
893b4abdc0c9df36c241c58769810f69e35dab48

.github/ci_commit_pins/xla.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
e60428d1b1ecfa1f751af5511db0d2a38c6533f3
1+
5e50379413e607df4d8dd1dda026736c12bb4162

.github/labeler.yml

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -69,8 +69,8 @@
6969
- .ci/docker/ci_commit_pins/triton.txt
7070

7171
"module: distributed":
72-
- /torch/csrc/distributed/**
73-
- /torch/distributed/**
74-
- /torch/nn/parallel/**
75-
- /test/distributed/**
76-
- /torch/testing/_internal/distributed/**
72+
- torch/csrc/distributed/**
73+
- torch/distributed/**
74+
- torch/nn/parallel/**
75+
- test/distributed/**
76+
- torch/testing/_internal/distributed/**

.github/merge_rules.yaml

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -74,6 +74,7 @@
7474

7575
- name: OSS CI / pytorchbot
7676
patterns:
77+
- .github/ci_commit_pins/audio.txt
7778
- .github/ci_commit_pins/vision.txt
7879
- .github/ci_commit_pins/torchdynamo.txt
7980
- .ci/docker/ci_commit_pins/triton.txt
@@ -84,6 +85,19 @@
8485
- EasyCLA
8586
- Lint
8687
- pull
88+
- inductor
89+
90+
- name: OSS CI /pytorchbot / Executorch
91+
patterns:
92+
- .ci/docker/ci_commit_pins/executorch.txt
93+
approved_by:
94+
- pytorchbot
95+
ignore_flaky_failures: false
96+
mandatory_checks_name:
97+
- EasyCLA
98+
- Lint
99+
- pull / linux-jammy-py3-clang12-executorch / build
100+
- pull / linux-jammy-py3-clang12-executorch / test (executorch, 1, 1, linux.2xlarge)
87101

88102
- name: OSS CI / pytorchbot / XLA
89103
patterns:

.github/scripts/generate_binary_build_matrix.py

Lines changed: 26 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
* Latest ROCM
1111
"""
1212

13+
import os
1314
from typing import Dict, List, Optional, Tuple
1415

15-
1616
CUDA_ARCHES = ["11.8", "12.1"]
1717

1818

@@ -95,7 +95,7 @@ def arch_type(arch_version: str) -> str:
9595

9696

9797
# This can be updated to the release version when cutting release branch, i.e. 2.1
98-
DEFAULT_TAG = "main"
98+
DEFAULT_TAG = os.getenv("RELEASE_VERSION_TAG", "main")
9999

100100
WHEEL_CONTAINER_IMAGES = {
101101
**{
@@ -264,7 +264,6 @@ def generate_wheels_matrix(
264264
os: str,
265265
arches: Optional[List[str]] = None,
266266
python_versions: Optional[List[str]] = None,
267-
gen_special_an_non_special_wheel: bool = True,
268267
) -> List[Dict[str, str]]:
269268
package_type = "wheel"
270269
if os == "linux" or os == "linux-aarch64":
@@ -298,8 +297,7 @@ def generate_wheels_matrix(
298297
else arch_version
299298
)
300299

301-
# special 12.1 wheels package without dependencies
302-
# dependency downloaded via pip install
300+
# 12.1 linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
303301
if arch_version == "12.1" and os == "linux":
304302
ret.append(
305303
{
@@ -313,35 +311,33 @@ def generate_wheels_matrix(
313311
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
314312
"package_type": package_type,
315313
"pytorch_extra_install_requirements": PYTORCH_EXTRA_INSTALL_REQUIREMENTS,
316-
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}-with-pypi-cudnn".replace( # noqa: B950
314+
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace( # noqa: B950
317315
".", "_"
318316
),
319317
}
320318
)
321-
if not gen_special_an_non_special_wheel:
322-
continue
323-
324-
ret.append(
325-
{
326-
"python_version": python_version,
327-
"gpu_arch_type": gpu_arch_type,
328-
"gpu_arch_version": gpu_arch_version,
329-
"desired_cuda": translate_desired_cuda(
330-
gpu_arch_type, gpu_arch_version
331-
),
332-
"devtoolset": "cxx11-abi"
333-
if arch_version == "cpu-cxx11-abi"
334-
else "",
335-
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
336-
"package_type": package_type,
337-
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace(
338-
".", "_"
339-
),
340-
"pytorch_extra_install_requirements": PYTORCH_EXTRA_INSTALL_REQUIREMENTS
341-
if os != "linux"
342-
else "",
343-
}
344-
)
319+
else:
320+
ret.append(
321+
{
322+
"python_version": python_version,
323+
"gpu_arch_type": gpu_arch_type,
324+
"gpu_arch_version": gpu_arch_version,
325+
"desired_cuda": translate_desired_cuda(
326+
gpu_arch_type, gpu_arch_version
327+
),
328+
"devtoolset": "cxx11-abi"
329+
if arch_version == "cpu-cxx11-abi"
330+
else "",
331+
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
332+
"package_type": package_type,
333+
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}".replace(
334+
".", "_"
335+
),
336+
"pytorch_extra_install_requirements": PYTORCH_EXTRA_INSTALL_REQUIREMENTS
337+
if os != "linux"
338+
else "",
339+
}
340+
)
345341
return ret
346342

347343

.github/scripts/generate_ci_workflows.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,6 @@ class OperatingSystem:
158158
OperatingSystem.LINUX,
159159
arches=["11.8", "12.1"],
160160
python_versions=["3.8"],
161-
gen_special_an_non_special_wheel=False,
162161
),
163162
branches="main",
164163
),

.github/scripts/github_utils.py

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,4 +178,14 @@ def gh_fetch_merge_base(org: str, repo: str, base: str, head: str) -> str:
178178

179179
def gh_update_pr_state(org: str, repo: str, pr_num: int, state: str = "open") -> None:
180180
url = f"{GITHUB_API_URL}/repos/{org}/{repo}/pulls/{pr_num}"
181-
gh_fetch_url(url, method="PATCH", data={"state": state})
181+
try:
182+
gh_fetch_url(url, method="PATCH", data={"state": state})
183+
except HTTPError as err:
184+
# When trying to open the pull request, error 422 means that the branch
185+
# has been deleted and the API couldn't re-open it
186+
if err.code == 422 and state == "open":
187+
warnings.warn(
188+
f"Failed to open {pr_num} because its head branch has been deleted: {err}"
189+
)
190+
else:
191+
raise
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import argparse
2+
import subprocess
3+
from typing import Dict
4+
5+
import generate_binary_build_matrix
6+
7+
8+
def tag_image(
9+
image: str,
10+
default_tag: str,
11+
release_version: str,
12+
dry_run: str,
13+
tagged_images: Dict[str, bool],
14+
) -> None:
15+
if image in tagged_images:
16+
return
17+
release_image = image.replace(f"-{default_tag}", f"-{release_version}")
18+
print(f"Tagging {image} to {release_image} , dry_run: {dry_run}")
19+
20+
if dry_run == "disabled":
21+
subprocess.check_call(["docker", "pull", image])
22+
subprocess.check_call(["docker", "tag", image, release_image])
23+
subprocess.check_call(["docker", "push", release_image])
24+
tagged_images[image] = True
25+
26+
27+
def main() -> None:
28+
parser = argparse.ArgumentParser()
29+
parser.add_argument(
30+
"--version",
31+
help="Version to tag",
32+
type=str,
33+
default="2.2",
34+
)
35+
parser.add_argument(
36+
"--dry-run",
37+
help="No Runtime Error check",
38+
type=str,
39+
choices=["enabled", "disabled"],
40+
default="enabled",
41+
)
42+
43+
options = parser.parse_args()
44+
tagged_images: Dict[str, bool] = dict()
45+
platform_images = [
46+
generate_binary_build_matrix.WHEEL_CONTAINER_IMAGES,
47+
generate_binary_build_matrix.LIBTORCH_CONTAINER_IMAGES,
48+
generate_binary_build_matrix.CONDA_CONTAINER_IMAGES,
49+
]
50+
default_tag = generate_binary_build_matrix.DEFAULT_TAG
51+
52+
for platform_image in platform_images: # type: ignore[attr-defined]
53+
for arch in platform_image.keys(): # type: ignore[attr-defined]
54+
tag_image(
55+
platform_image[arch], # type: ignore[index]
56+
default_tag,
57+
options.version,
58+
options.dry_run,
59+
tagged_images,
60+
)
61+
62+
63+
if __name__ == "__main__":
64+
main()

.github/workflows/build-triton-wheel.yml

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ jobs:
3434
strategy:
3535
fail-fast: false
3636
matrix:
37-
py_vers: [ "3.8", "3.9", "3.10", "3.11" ]
37+
py_vers: [ "3.8", "3.9", "3.10", "3.11", "3.12" ]
3838
device: ["cuda", "rocm"]
3939
include:
4040
- device: "rocm"
@@ -94,6 +94,9 @@ jobs:
9494
3.11)
9595
PYTHON_EXECUTABLE=/opt/python/cp311-cp311/bin/python
9696
;;
97+
3.12)
98+
PYTHON_EXECUTABLE=/opt/python/cp312-cp312/bin/python
99+
;;
97100
*)
98101
echo "Unsupported python version ${PY_VERS}"
99102
exit 1

.github/workflows/docker-builds.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ on:
66
paths:
77
- .ci/docker/**
88
- .github/workflows/docker-builds.yml
9+
- .lintrunner.toml
910
push:
1011
branches:
1112
- main
@@ -14,6 +15,7 @@ on:
1415
paths:
1516
- .ci/docker/**
1617
- .github/workflows/docker-builds.yml
18+
- .lintrunner.toml
1719
schedule:
1820
- cron: 1 3 * * 3
1921

0 commit comments

Comments
 (0)