From c4b14d40450b4dddf92661e4ef837f83e8b0bbf9 Mon Sep 17 00:00:00 2001 From: Naren Dasan Date: Fri, 3 May 2024 22:57:07 -0700 Subject: [PATCH] docs: Example on how to use custom kernels in Torch-TensorRT Signed-off-by: Naren Dasan Signed-off-by: Naren Dasan --- docsrc/conf.py | 2 + docsrc/index.rst | 1 + docsrc/requirements.txt | 8 +- docsrc/sg_execution_times.rst | 46 ++ docsrc/tutorials/images/circ_pad_example.png | Bin 0 -> 17593 bytes examples/dynamo/README.rst | 1 + examples/dynamo/custom_kernel_plugins.py | 666 +++++++++++++++++++ examples/dynamo/requirements.txt | 5 + 8 files changed, 725 insertions(+), 4 deletions(-) create mode 100644 docsrc/sg_execution_times.rst create mode 100644 docsrc/tutorials/images/circ_pad_example.png create mode 100644 examples/dynamo/custom_kernel_plugins.py create mode 100644 examples/dynamo/requirements.txt diff --git a/docsrc/conf.py b/docsrc/conf.py index bffa8635ab..d1cae714cc 100644 --- a/docsrc/conf.py +++ b/docsrc/conf.py @@ -184,6 +184,8 @@ "ConverterImplSignature": "ConverterImplSignature", } +nbsphinx_execute = "never" + # -- A patch that prevents Sphinx from cross-referencing ivar tags ------- # See http://stackoverflow.com/a/41184353/3343043 diff --git a/docsrc/index.rst b/docsrc/index.rst index 455aeab8b3..175ab7e8ab 100644 --- a/docsrc/index.rst +++ b/docsrc/index.rst @@ -111,6 +111,7 @@ Tutorials tutorials/_rendered_examples/dynamo/torch_compile_transformers_example tutorials/_rendered_examples/dynamo/torch_compile_advanced_usage tutorials/_rendered_examples/dynamo/torch_compile_stable_diffusion + tutorials/_rendered_examples/dynamo/custom_kernel_plugins Python API Documenation ------------------------ diff --git a/docsrc/requirements.txt b/docsrc/requirements.txt index dda5333a77..3f9fb34857 100644 --- a/docsrc/requirements.txt +++ b/docsrc/requirements.txt @@ -1,8 +1,8 @@ -sphinx==4.5.0 +sphinx==5.0.1 sphinx-gallery==0.13.0 -breathe==4.33.1 -exhale==0.3.1 +breathe==4.34.0 +exhale==0.3.7 -e git+https://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme -nbsphinx==0.8.8 +nbsphinx==0.9.3 docutils==0.17.1 pillow diff --git a/docsrc/sg_execution_times.rst b/docsrc/sg_execution_times.rst new file mode 100644 index 0000000000..6d90c48660 --- /dev/null +++ b/docsrc/sg_execution_times.rst @@ -0,0 +1,46 @@ + +:orphan: + +.. _sphx_glr_sg_execution_times: + + +Computation times +================= +**00:00.000** total execution time for 4 files **from all galleries**: + +.. container:: + + .. raw:: html + + + + + + + + .. list-table:: + :header-rows: 1 + :class: table table-striped sg-datatable + + * - Example + - Time + - Mem (MB) + * - :ref:`sphx_glr_tutorials__rendered_examples_dynamo_torch_compile_advanced_usage.py` (``../examples/dynamo/torch_compile_advanced_usage.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_tutorials__rendered_examples_dynamo_torch_compile_resnet_example.py` (``../examples/dynamo/torch_compile_resnet_example.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_tutorials__rendered_examples_dynamo_torch_compile_stable_diffusion.py` (``../examples/dynamo/torch_compile_stable_diffusion.py``) + - 00:00.000 + - 0.0 + * - :ref:`sphx_glr_tutorials__rendered_examples_dynamo_torch_compile_transformers_example.py` (``../examples/dynamo/torch_compile_transformers_example.py``) + - 00:00.000 + - 0.0 diff --git a/docsrc/tutorials/images/circ_pad_example.png b/docsrc/tutorials/images/circ_pad_example.png new file mode 100644 index 0000000000000000000000000000000000000000..76a8e9fd16cb91b46d41d08827b667fc526e4000 GIT binary patch literal 17593 zcmb`vRa6{Nx3wKXg9Ud85Zv7%Km)qrm4NxTys8)5LszagfBQ>K79CqAodd^|KY<&THrro(cmQD{ASLkOLsb;qvmPYy7}oZun!|?=h&}JOj{`P^hQO0Vj>4*rLiR>_j%GI2 z-xSTPj6N_iFfns6Fmo|8d}CnbV&dRpVGp~m{r=$th)4`1py;Z7vg)LXKJvzYo~RG) zOhV@Df(ZrXoTdL)KdekQ+L@YdaaEsvQH$bt*{|TxpK#e7>3Wst4jp$#86L(ycQ4mf zQAX?BKH)7U>)g!T9*8hR{$B-w4}UTnTUun$eZ(V1gMflU2!~PEhM% zh(KJo%F4=!IcUIm(4EL%!L_8Q{ueXT=_@S^1Si>eOF5M1i&F9{^kn9 zOreRZ`+t?n`XAh#5}S5^875?>^LVij35CbZ+p*j4Cq4exisDgU-d{6-1X9$FM%LqK zG$Hw<&4-Tc#&g;BG;-zr)GsV2NKi#+V0(H^WT{pIGk|E>^Yo`dKdLo@_hNTHV@`{W zy*%w@w@KwGWqJs1#vxrq?+IIomlM6BtY3oD3bRKV~I zjSzNp{O+zE)%AyQJbU(RW^SY~a{K85sk|qRmBGS_D7!;u%-z{s9fLlD7foZ`a5xQp zpd2E!Y`s8P%j;~l_dZMOf@*`adVkdKPeVmCmlPMrm{wSGei^7}y&%ki5Fg!_z^b_C z?ZE)AX(x(CttO1OL#E8VdV8^6&#vkwC%>$@(`=2a>c&Hr*XgmlHgm#iXWS&5l39gZ z?GgtaCGUDdpy}R5iyKLF*Ux?Hh#If$cmnDCG2Y=GUsa?a@^}wcLgYC6(&qxJ4wps7 zer-Tn>znu?VT)Qst#ugvaK{*Q`Q}Db!J@-3W|RGXPaLDy*T>g%3d+O>Is>=v803~c zY-ix*tu!34xvrXvr%P%FCJI)o!I{3s;OrIqb-P> z^A5dae)DNI+kN_r>xF+*6B@me9B5~!$P*@pR3cEC7b@iErvyb_o1o5q=ltM7-&J_k z28O8V-*q_p)Y2HoF ztdchne=(d-9jIvMN|zi$gHH=HYEk$bEiFxtH>6E%)KON|mAd}zG9%+YMW4m`uKRHh z=B;R{#c;LSz~xh|{uU$g&|i;ay}zcEj*A(XuM4vSKSJ_F#xogFA;*sh?XDDvrw1Z6 zYiO`+8Q^_xVc*kISF23eFzE20kSqRlMpiws>z}2?XfQ0R*<>po7X#g*#<_H>5+gtd zk8?tieRo_)fgar2jy9VO zmB_UPt(2fECLV-P4-J;pDk5_{lBMk*e$P|wp4t#UT?3=Y#zs{+`8R?mH-p1gN-U;- zumPb7XmSttYaL^m=4I*!!Ggl#f7Q!GJZ0CMUD+F(q*3#(shOTX>u2YjUl_17r>bnq zMDTmUbcOV^FqFE$nH?pY?&t3|I$yvao3xN970(h=B{J;ZS7Cp)^t_*5+HPf4t+q7c zd)A*TBJlZLNH!O%92g!*q0@vhf65hf7{+EWo7=QldB&vfax^v&a%=5?N_rDk2ccHN z@O=m2Htn{rI%1&e>Yn$SfP-LxF1@RN?g}LKqF**Ml;eI?E}5t;B&;ayc7+M@BeSY} zIly1!q7O(PVY1|QgcA;p??37rcYU3eiJQ$VEd1GSTxN0GsY}m&ha0K)hRv^7tc_HH z!MVAXUeX%gURCrd%w*}Js$QRTB&^W0TW~$cd@S&QRTppmq-n6i&H1EaC1ujyDVgI> zgY6t}aUMTf*9NKB0JGWxUFhhxV61kN!~p7=Nn?a%v(-`f^C z>JZn$8oC2a#gNTvlWf#+HSii6UKqlYN-R@g`yog~QEvrhGB!HJ?>-A8y~=5?msjTDd^T3ICD>Q` zjE%#bC*sF_MH%%si9MR7KT{0Mf~3iv14lh*w6C;HOhX9pvZ{x_A)z6IgI_Bd6Rp4r z-|FnMr0|!a{b*7z16IAT=OIm&1$^lr>Ps&%DTWuA(w^W0lU=i!=u}z6T*6jOmxKp0 zrjKwv)DJw~5~gq5qlnr@+cTbG#n}_E`5RHmA}V)$M#Aw;$XGyaX5Wj+Y1#fZi64G> z4v!!KiTGbBhr@?r(2p#1h)Zwk5&lsN2=VX<)%t;6Jnw6_sZUHy&*n2|Citd<*xccT z+PlDMzq@PUrB8eOTR8NMiBZ@;=rTc>%uWb+KTr%;m9Y7O-Y_RnpB|b_YHoaP**G>= zOeXTyoGRR>^=07ZX*2tv_u2>gs~5j{zL44TNLx>q((1hN^!FpSR!q>&7S4b`*W^O| zlAf)*`$mh6{$2X>C;H2lkb1cGrr|fuyE$4=kzH@9=JOnsd6D_F}4RTLv+$)!}ay2Ce&)F zeKBT9i(e!C=4c(glx~BNEh&>2H|4 z1t#a=Pf7y@N;2wm=BfG#VCRg+MIHojIl_YlBkez@flm!DI?#MZodhHd#TLd&_V1-EOe^*p-TL52EKM#NX;k5yclQ(uW%Jnf zxFUA;y@yIeOc!fu_sSQ?2(h9&qO@k_3s+E!e_m)oo%V&dXPQl1ga<1>F zDBj{#=Yfw_5JnF7KRa*ec#JY2fyOhqg8e+$w$@Hch^aCc7gaU+Tp$Tr5WAk{d6DBq z4ME>OX+yp4cE2Jq!G7_K=y{5~G0QAo)1A6FJB2TsEB`rFvT;BIC0zWGNMGU z7tF)Wu$Qm&7Jl45r~392^lN%oLa!d|h(@Wb3i8gxp&zrhU;i`!JJUN~Va44Fi%yk@ zs_~YVLVq`Imx6R>1pfL)sSWyth+(zKvOoYu2@l%2D^glOC}B5<+~(=+_1swo6o=@D zvvF&N*-YMU6@QipFSHtBPf?CZud+Ng0`tbdoG^XxUExi)Bj_IbT`R`J6@VaqrjyZ% zNTr0;r#nCkx$v{r&G@W!tJ)Sl>l`yCG#210YHQ+m`PP z=JZ`yY*yhhy;^NwE`+afbVIZ>Zo8e`}{Za)8~!iZsH4N8SRqyJ#&YQt%t_cqP_m%I_R!CqWe zqt^@Y6Nq!M35nCgp)Xi|o2bSVW${cuqlrmVY7hJqPH5v#7trtdeQ3mw7UrFnk#C9c zW-vJZ3dFkqOrXIIjc4iVo^wFPyN1GDtq+cQR9eja`p?Pis%N_1!9tVA1G`ARpnuv4 zE}F;yN>05)HIYH7SkYE(Be-;_`rN&0$J{Ut>B$A}Yu>fZE-`!V#2*L+Sl}Q?3GqjR9i!92K z?ppV-h2mG#G|h+h90CntF(*LBwNu0!%IArXErd*)FzkS3GPmy4*H71)gKEuY{(8qg z636@^4e@-q>b~+%3?4KY=+}DodwloLT+= zkIj7Hq?y)iP+m-4#ucGXMf;1zR%7eZ>Dy`anU?FZCUh*yuC8VwR@9+}re>PG=+B-O zReoj$hRzyt`rZUhzSg&2ZggR?t%{7-_4R0SoWK==M#%5o`+OMY`pLlAYV9+BW{~Mr zf4B{J*(6Uplix?6StneBRArp|N^{!5XYGpk?BDIdvecn_Jeu5#!9bbbOnsy0DFmG> z{%+OcVw5V=a^!lgs*HpbP^U?@yH%@C8J=}`1j7D7PUZW;0+KI4Y^Ykb(w)#IbmXsX z^zY|Gns z(*9{|v_;pjgUz%^^KPsdcXPDr#Wtmb04PkM@<+0~eyu)tP~|n`mvv@gh^9;9D7(~i zy=5i)RSq*>fCa;)=Dbr@8a;2&L010b?Z+$26dXsBlu}Eq4uU@{KRyyUCip zAE1vG8C2*rd+-V%d5VTy zPb<}q5H}1PmvU9!T=~Pe@Or-m-3g~sUJ9x!+DWd}8 zQ;1~RSHu+>+Nq-2=A3)CvCasbYmSCfSDEw_paXBPi)wQbHa6>^ou#)KoR28*WMA3n zk_btjqPoB;UD>tXH`uedd-xh#8yP*~VU+~@cgGtCFSTsBs4qAIPf77@vCH|5D0Todz;^+u3jn7M){n)3z=#VhpIq!#i@7Cai6SbX) z)ZOX@2l5yG)?mLK1Dnd@`Iv;68J5`5nK|L0xV)qQ9XEf7d99>g9 z-I=yf=Sac34whEG2o8I>zbWdvo6egUBn%J{6%G#e4xlEKW`pB$$z9q zIs08zAYn9|h(WsB3DrpNsx5eWX{Fs{dGWD6w?Rwi@Tt1}RMy$oZbp?p1<*JuR*}@06(K)=pWr%Q!c`iz>=6?~ z--aSfXvUYQpddF@hNK)5jSTgOX()tjt5x${r1113mmSq@mvx+3skVhQz=PR~$cgP6 zy?4oURLbR|D~uFc6(QSixi-5NLR7)v=_P}oscgLAa4EqDnF?N#Bma!6V=C8r`iJq@ zD>Amd{wat$_l)PaObZU?I}!$l#XK6#H)Cf$A26&XBXw81G|g8iHV2KvJwUWSMu0Y$Hd|yluuqS>71w@eG%OD{@?ycm zu(Mxl(b9e%zZO{85mf9w*OWC=qph$QO=IYJJJI*vhHKAm59%VZnq-~kw)C#YJcAlc zQWmw{Jc$9OQ}f}37HR|pEKC-`cDFfhzpa!S@YQF{{D5Vq*ZP9R$1B8E@!9WwpVi4+ zj4bxmU+TL~_3c+Gm6D)|z29qa^{?KybzJbPMGYjwly zjHV-rK+dK%mF>|>)--V;ojy?AgbPM*343FKUKPySgG(jM7h>@x2+yFO^A7d%Bo*=f z6l%&X(}L^t7d;zL@q}@apRG)Rc(Kc7}S0Rp4uIk`wPz}V9c-h=tS8^(_+)_=8*avrT^NAg~1;q1=ZS8|8iQY?z=T9 zIYC5kKBTE)YwlXBS=fY=(#cABW>u$Z5nI_x3ZhJxtSnP&tB56wd0;!<;`ixqY;usR z6hmb>StlHWhW{mYwO5J740YDi4Wi_<x4;I%B=n4!_v+HvrkSOAJ8Mzn9^N#o;p~VR8S2k z+KV3lFqso9`+_E2p80QH)A3x^&MRS5od|v*5q4L4Zlk0%d!t<`k9L=VT^MrjVCIl& zaX$I^>G8QTXhT7U1Z(1HYE?OoU|gFoV=TBFcdoX1Y%(}fAiBAA*>^(#C3&2hw&Ycz zAX{-No1?*w}^H+>5Vq-SGPC6||Tw#BSj| zhDoUmCGD`#)%X{-zRU%kbS~>rj^b}^rMlBWNO*E}@Xz1Bg~iWrQonKbM;s;3_et*3 z$rk8YA&dPXQE_fZgoE3+z)ov*gq;^3>CFA{F-@yO8?=jpLlCKqfb-WP41@Sm3YOHD zu{4juk5O4!b@w2+G?@vgCYnptL`+oT#dKR+OCy!;LpYA7Q^<5z_#j2Pi_Gn*N;U4; zVjD7I*ce=_Z$4ewu6Ja?4gK#%EDoO@RKH|0eSLcq;PvTXQfP)(+|c@bB&j`F!7-c3 zM@HUQUH-aTUWuw!VQF0IO0o2%Ns;J*@cVK3FvTm=X^V~&DS`@#7C2E)B2y!dY)>#gYJlI(`qmd znXy87FJZpG3nr8%%Vj&-@tx@P5wn9tg~Qr*T(nx9!jL-mN0L#)$fZ!^n(jNiBf7=G zb$?@I)x1_+&M-U%GZ!n(>FP_>{DCB4a;;k(G?z7#dvm!&a@}BBSW4J!v?-!k@?sFH ziBZN~-+e7SzUXdBLMBzyCl_UqwEOoIHR>(pr9dWUY%*&wff5ywRwpt|ttOP$-VToq zo*Ey|)?$KyrA!M~A(5s_fS$f8@maz0*nCP-gC?0bI?AqTbp5aS!k;O%>1MYZyi%?? zp@SzE)q}mR%x~ttMjh6J7IByZh#6mzFJ)*|I|z9DqxO*yrJGxFY+s*&NJbtDOZRA_ z>-o%!T;`JX!31!Ob-71BrLOE<1=%?yoPmy5j*w~&zMG}uy@N4+i<(#5qe(tzBoncW_UU$h^^{bv4Ms90)M&$Tv)}pZvD29!HL6wvW!-TA zWjvCWT(KnSb~ynO4m2=5+dJo}BJ>mG*1x=tXJGhAbXzQOliR1gT2*rx>DeDpWe&#X zgA^2DuUP9gUtn#t+Z|AQtoZc%eDV%iJdvK3c;`&LQSUQ`%|D1R4BASgalBj^>WQ9k z&`=ENw!>OlJe|d?3_IEFhC(S9YL!wmO(ClcV*N`x!+5)Klg|5HfOm{r|`K^ z=o|27gATtewi%Hs(>uTFar*34QWWX%##w($J=L{f##W)%-e-!*4=S3u;q|=-1esa`FdSd)1%ZXnz55&0_TCLMqB)<3I<}f$-D=h z2^({pDZ*`hjIgrxocX9aZ7vV&?ZAJVHzhW)1P(jHANSpEO(%CIT56jgtLEXAiqFN; zEVbisX74{LE{vrzdavp5x9XPBUqK=CzK3fA>PwBd7FK<|{h>d*pJ`n%G5LgEWe&Fn z)F;xLezm7V>)uWuK5s??2dJ8le)+fYdqJ-1#=^w9Ljec&uy3CK?j@?OIHgtsQkyYm>FG7PKCuz!JJ*btr`I~>IWkX_?Zn}LFKW*)AwRmlDOsIXOH7BFhnpT>YPfZE=u}~|oqV=6t zOP0j!1-D&Boh$Y72Mkg353#VX*_3g$FhnVW|JmKk8x4Y$)ztzj>L-7|%ZrX|i;s{0 z?8O>^IHfXE7}cI4qIEsT;{P?<;6FYKTILqtW?xMrj4L(;vHjEKE?fpQ6fqN*I%x3h zOH_C-a3Mhi`3DEDWak*%?_Xc-PY_?O=fvjLS*|H6(kJXChDF^u$%!LUV>V?O7rC3t zmqFqvSqDC%3YTVirkqz6O^mzcWuoefcr zo^h?YO|s!o8fgn)ZiN{)>6c%9VzFL>X^FDwNl7n>b2sVT(C)c7?;?>Cr@JHM@1G@= zc(+UX26{w`bcVVErg~PMgYpz=jI-ljtCYn(aB+`0?LLk71Z^MPo~W`JeL3ZI_jm3I zBx0kJj*A;1yQ4klu~W7j>YLbS(@`n0O4&)NP_08;FThy+PP@x5D^PT>gIy9s0|*Vk zxT2;wXSFe6Wwr@D3`>)1_N1IGR{4CTGA5#FiC15`N+&+NZD2`LN+$JNFA$!X7<0Wq z203T8%4Z-_mCxx2X=KH28*mqvop)g9)|-03EV&x&8HTj+3EKN7 zVuNHDN4)8(_Xo*{?ZqV3xAW;F?Z^JF*cSbg4a;U7BFpeFl$f{xXh$x0SUgvBk;Qc7 zTPfH)N_#fs=81`sJXBO#-LIe`#Vvk_xI91KHwzBMc+!@fh)*+($3nOLt3{56uzZ?L zVJ*~Xss`dzFX;hzoQhgis=vZO+ewueBtEU*MxFCX7zA(|g{$PxOV z%?#|$#+3*X1MW^d22*I^fMn;l%14347B7BhtEI5Si5IyhcT&R%N*VS9PKVtunhlm+ z`rO<*<8&&kD61DY!e2grHvE~0IHO$N`vV$UQm5|A$F8nye?04zj;IuLs&8(WJB{Vf zZQgI*$9&#tF1RJ}v|-_gGhs3Ix;405M<{MK8b76hX4SgIWFXajyZQiF6u8zB3*EKF*!@W9 zFx;s_rph^_3FBHs;zL9%H1MB~Po~O@Bx;Pfk~>rg}aX(CY+_grXPb zI%3LbH}~Eh0+HA~9ShnLlSyD-5@X?d4LKAyS9U=et$0mBg1kwxW}7R%b-4wDQ$J|; zDn$ac+l?}FMeg0`am8d=I=boRNeGzogSa#aR>dS7KUafr0hPWlwiLn*M5)E zBE2%u&g>0@?q8It#lXOJ3g}#7V&aSI3+7VIgFw>LE4#NjtF%UUtT_vnp6+A`+u7|T ziXxw$0vS;tEoRVY8Z^8;e{e7yih~u6Q=a!CHG3f7@;4Bdpw5$bNk9|iJ9rCG0V$&C zR4lh7y42CztcC_#EZC}wl^@RJ3N%z1AXA8qF!0cP+74XIa>He=HD4D5ewoV$gWdrj z`SOe5XLXZ>=V*yn9%vqgOW*B#@BbF;mK!ItW9$c$V~-XJY@+3@NlXstZ_5VzT zhC?w#fG-zYu$&#OCiHHMzkU({3~r>u>4Ki-_*5YA{gtWTK?(IFosI&SoHASu{=QTcC_tHVRl3C`8ov zO3q_rG*tStYP#|M+n2Bvq;DSMJUsp7USWJaa)A4lSLcc&@R8nC6M^gSTY*&S^%j+4 z?RO^Q;orX!WrRT;i?dMq(mm>)>4OXkB5{;DU!tA~!hj%kC@05w90arru2f|o*`#|D z%Eaw;Z`1k48q{1wF%D<)(S5uW+C1Iby~(=pC~kM+S1ZNQDPs&RD+T6?J=gunWk4Vw zx{S8cl?|lsTz}a4ihus3HHjCyw{`xTm<^;BmDYVwuMP03G2-cDhfVYvMNm4>kNn$! zm6TB{O|TXarY+W@Z25h3a;A#EChJY)-Ck63(X?|(C^j2J)ACKmlC}J zYRRjjB=#yUp@Fw&-_CT?_OQ>lFYzUFsK_tHGvBfToVL!Rv@qBrZOGz&i)6(vptA0a1)bj<1fI8ie6glX1=!n<9kFO*eQTduh;GBEhhSRTK zwWgf=w}>Y*d6voS;j4GD-&@Dz-F0Soz6lf3r?nFe+@F_O>_0Tl0ZFRx_b!t>IB$R| zG8~UV5SEBTpW-h7;`~Lc4?v^PY4)47o4<9xYI%Xav(NcMwedtqBrnuOQhRa=>yAS; z#<*&@f+#9o@#I~WC!eIGBm5)0_`U6)jso4aR#Wv73`BRgE~hKwUTRtkHtU^w5B3DB z)llO)i#LY(JeeMk>mS8&lv5+)kuqdPs&LUxeQMN_`Abjgy8l{vWhALFxihSOghCzEQD$Jq~9MYrB-}|H-W9gxxO4sYt(x+zeo9xQ_gF%@Q0;W-SSR<*W0lOKv0HzS@B$ zoTXDK#iAfew`|QDRM$11oCD5jLjwB+MljDDJ(}3)3-XJX z`D?eM<*_K>?8Hor=o_fU7taZeuRH+S@#=+Ma%N%+{Pl&V{qbGekChG21m;M zFC@7M(Eqaj&dsUpyg`q+^veHEGsb5z#a*9|1YH)`ozr z0YcD2Ms_8{AgkX&M@RJwE~Kauo;^Puv*-nZPm_+mn?XME_XK;F$yI1#)D~-C<;Od8 zIgNk!d_*G$RGQbIM>sam$K8mMY~`ZILUVaIhqOHsE-sO{Nq{3m%60_L`!ucG-k|5* zoZSb&70&0uU9y?}dXB)!3YYIV5seIPyKJ<3<4d^)2c9~muj-_xIrE5qEHMgoA_K3#P_+;XTI+CNJ*Om*I&@O}`3v20B)(htF1gu|E7cBjp z8oGj`-hjMPrT_X+FO6I#rjNqKy4CgOi_Ln6X#Hriu-QxVXuGf8EJd%+Yd9+%-6D?% zo4wHlVu@I6<SC zZ6Bpk9}Tic>}yYFH9)>{^Cz}bU<$+xkHB~E?BGmpyTpvG*~&Nege5$K<)cJ7Y=MsDV`g~s`eJm% ziX~kQzl$fQUV%KkXUGViP|auKN#}F+$2YJ4)fFlh#|3{fi2Dw%x4^VMTj~KnkvrbZ zv0P+s=`D?aiF(PRczJ(Iw`%$9xBPxcDUf0Zu-6kQkO}2VB`Sm3htgP?J78S9rv(-`Rux zE=sgbwefK8!d=|s@Bz?osn!kRJK@Xl4dDFM(+r7Cm$HMErm1BB8RwjHfLx-sevAyXJl3~Rw77O_}=J|G@I%{mi8zC3LU z{;unH?o0nUzA;S(l^vg>bHEU|+)2h%m9wYB)BmNUWG^pPc^+Ds1O>KvR2*fCnB$%~}=&l;idI>ybom zZNi$QNv7&0bZSp5NyUqV076vF47S%hT%g(U5c&L)GK2pHKyld_M&km`VufcC@;xil zBP_f^pLz|BVF!8kA$jbosy;7OKGLpKeq5?C6j%ow3Pl3Bsi~jwOr-Ge@aKR8Z3j^? zCTOa`iDFpcux`m9tva7!MEh5<7Z{D)ua|~V;LZI8SOehtbH+rbxDb_RpI0~$50u!v`_a??#Lk+i0=4>Or>1*E&GzA)rUFr zh{b#h59pF~K=Bw>1$?HHwX~7P>)F@!YoS1R$ZFQt?+scE`ie%ODG4m4*oqGTa{-zx zNd#hHnxcB}G!!*kQ@7WEI6MLGuL<8KmjTgcG7gVR_s*GE@{jZ4f21at{gLv-d_Q@< zw7skLn~`sv8FGs@kg?U7q+pz35~KYcP8Y z#UrGCoB+i{J}sVit+%9!u>REGD%^pIb{})+p>`S5bz1gx zU%DmQ9tCK@C58Qm4_-a_2p|yRyOYo|DLY#tFAUm|mpxQxY#`v-0H0Nn2TS_4+Vfv?Gjv3=mMDmVc5ENImDtWxdLpB z#VCipv2(WcF*iw!kcy*co`#vOJoBpj0}veyFBE8mq8WrtA+Rr zpWnu8^eThZTC|LdJiscHOmtqD&aI&=v(p5qNg32ysp9Q9!Sy`Nqf4gLf65yfv1?$U zltUx24BDKXVlLFE-&}BM0%}9xE^gvCsg&95T)YfOi1Rv{vUnZ6nk%(SedcXAR;y4H zT8-@TJkm_#N`qHVz6(XD`E;%JsZfYF=N;-*w%L6scZMoyUfEX^80vRGJtPs+{2$i&hz#Dk`e%2LhR$sv*cNw)ECw}`g3Y>^CbSu zT-;CS{FHpgX)?6%vG*BK;o^Sp0v7G2$xLFE>t4`IOX_$Sr}? z*73Nkv=r~4#LO|+>jZjEK?}eLg>|HaV2HRx{xk3Lzb#L|teePu>>AJK#!(s$IqIAK zYjpBEX`crNnC1Zd8)6dTO(ruk^SHvGEzXhd(ge-9WA4$OxT?tPI8D`b z@+fD32V4Mw=%Aemw?e*AF@33zG}vTup|al_{}^;Z_uM#4Jjwr6OGAC@_J$ja%lRvr zOpQ>r;Sj^$r>}TxE{bt65`!&w2MbPL0nHH>teG!`=igUzEIXKsMkb|^g{OiE(Q!HO zXKl=G3Y_#`Xx=<9b>J2@?DT?sjm`3rcKN2$HK^~M*GFo#0eB&NfEP;V@I+alTkR|i zVM{t-`gqb@+f^GNJ>1L+Fkya}6WZ@ch7a3?AG`9~}# zPLMdRE%MZ#zUi?p1KPO+67=1`x{rJK9+d2Fw4SSVcMPR#rvc&E%VKJQ!tRUoV99eiw;1hD+-KYmGkCzF57YLw2zlXek9&$jQ>S$qan`%|I zSm|&eyiys*kZXc~Y=5^^aZ*`9DJ(_8XswVlgQXvBNFRA!zh^%x_2x3rJUIzn$oB!n zW;_+Tcp`NGU^=>K8CAK5aJxGl1i@o7=>nVvFp|*&?Ggtb`YNU21X!MG)yASt!*%D2 z6RAiRg@6>Rv+QBCylkYZZ+Ckf460zJenZBtYD_Kq`1~&jp;dY1k&!KzS)y6BPUM;`6xd{a$Q=!$c`v-ga(|V0*Kg6gVUPv)3uKwKo%wYV$ z&ttj7BQff#fWCRSRiw_+x&Yw)=yOQO(n<97%-+0PjOAhz4I5h`dV^1uF`Ths$tf=9e(?VqO<1bP&vGvorM@vdNGg^9a`uXR zB7akq|J2ipQ8ol&X+!e7?V?~}Qi~6517I}XN)2+*ot41fH)iB`6<}7O!seakB#!#i z(AB|nR-y9{hH?qa-65*brELvdUQh7`!@PaF9R%;zh_<%Bhdv;J4z4pxH^NKhIN2cS znEB9vNYmxYdwR@-Z-&UggeK!>?!SPV98KbZxHWkCD-=5oVA;gtgJxZ|cD8uyNG>+o zD9CQ`M^ihA`>WMbtN^?vL$DSk943x6-vY^hhZ?AM?yk-0BOmu?Vj#&h9D6o8^J#Dp z2030xc4OrGK961UK972w{QJ3uGM5L_+4=KFHPa1;EEIpnP4km4e*`uvwEA(0O(%9V z@^Z*5?Hpy*ra{=maROjM*8X-@;COWpu=G$+@dGd?2(y&EYQFD#9nV4nd8V*%U{8Jp zr%uZmPguU^?)9-U3K7x73_nZ|{8M&ym-h*tDlxjF$(#cUH39 z^WhVJCO3sj5r%p_I2y=-I7Z?YtwE$#Z8&I}gpG=q_^rwJBah56 zCh2Zhvo;+nbhhfyB{6ZLX#3hd9${2JEf*Q*8GvYF05Jbuf)WhkmtQljHxydsR8i;@ zitwLB|B>*}pe;v<6sOc^dnL#FfuCkypcdbd)LJ8;7QnVPpwoz zP7Iq;fAo9Ao6&SKHzIhD==z15V_E$QLn1x}0f3xJvtoEn2n;8pH9(F)I1zu}rIezX z#8OS(TwfUcs)egNk|GQkiE))OpOE7H(x{`5t9J+cp!Eu*@Om{E-v^NQCo+X8-GJ%k zn5YJhQ8hosq8XroFE+^l!y<1VqK+4!PH^HoDZtLH@!#N_tqFP0MT)B9-R^9c-BmcG z4x)e^b<+zRNNMA2dBXJ6_-z9lKwO8Q9Le#T zRT*~Dq_U$kPDP7fwS4h$uhf0y2*;lEbs;o7XS8BRPU3UDJm8~Cr6odQkWWLQ)t(XqLw5yHKcodf{6F(B zJ2Tp=uPm1Gxd3jZYh*vlPItlGyJ|>&0`%lux|u2Q#CiHNmJL9$0-TEcKIyy%6?p$@ zLR{*uFljtLc` z#lk?+nq$`z0jOiwF&FGGKEHly+8!!kFSDT<69aj)CyxLWr9%D|wEgIg+h8;`eN>e@A})J` z3-*$hAQX_{Tr_bim^58%lQZ86IBEIp1O?Foax-Y|tHxTjo($g&t#D|ZXF8uB5n+R5 zJQeFY+i0B91I_6E>VN@s{Z$fQ)i$eMjmt>#5=iI z=}sqKAoiJD!venAav1aWAXTl#l5E}B!O*auZ&q@U`z7-!JelbVuH?t2zUT9aM=5xW zV7xV9!___gyt8dt1sNMTGOJC)lNT5xU}yHWC6~m}8+uDn%OWPdS08f#1}^Q>nZ=rt zjDyQ;7U8v>KLLI(DeYWf4kWH2JNH{4%)6gj>ieV?elr{OFkcxwJeRC{>x*29;$x}BL@)2 zLGGK0wj(j{BdJUyeY$T0zZV6MbJ_kjvy+rYNU--DJL^99h1(m?z+f8&>2wuT_b;C~ zn;$z4>9iVQAcm$CZltmbv3`u~@<`*T_OQ~V1b3b;Gv4r7zXHWXYxy5SN*Hhuo0XIX zX`hUjjK$FC%@viBIf@}SlQ6R@M9;VXd(?A{nw>N%9em#tepZ#y9pcA*|JU>5)IVjv zBeAdu9na9PfQWd`@pL|a%n1Vhp5msYGbSj~5ye2a0d1g_t7^fL_^UBb(>vXy0JI~x zm~D}IhHv$)%abGSr4(YXh-}VtRRcd(S{%@0Mbt0L5pXB#2~GOQkDP08KE87qW-mu; zE)batEOLk1Rbdek*wvJz$2Syge>EHQB>rn$R9jRPokJL2#|6%SwTjzyyXu zg17Rhi{1acjVy+Hv8hy4icu<|CeT${(>{e4C%-fDx%zDDQi%&O(y;)5UKkOBK?G8u z0El>&fdBE`cJC3tmX;?)BcsepVHl$6s)p)n`>#IaUqd9^6=j^v@-pD@6`bUFVTnKm dzuutlM$$X11gh(S|19!BOjsIJE~xwa{{T7K*arXr literal 0 HcmV?d00001 diff --git a/examples/dynamo/README.rst b/examples/dynamo/README.rst index d895cc0113..7191c02fa0 100644 --- a/examples/dynamo/README.rst +++ b/examples/dynamo/README.rst @@ -10,3 +10,4 @@ a number of ways you can leverage this backend to accelerate inference. * :ref:`torch_compile_transformer`: Compiling a Transformer model using ``torch.compile`` * :ref:`torch_compile_advanced_usage`: Advanced usage including making a custom backend to use directly with the ``torch.compile`` API * :ref:`torch_compile_stable_diffusion`: Compiling a Stable Diffusion model using ``torch.compile`` +* :ref:`custom_kernel_plugins`: Creating a plugin to use a custom kernel inside TensorRT engines diff --git a/examples/dynamo/custom_kernel_plugins.py b/examples/dynamo/custom_kernel_plugins.py new file mode 100644 index 0000000000..4165c54105 --- /dev/null +++ b/examples/dynamo/custom_kernel_plugins.py @@ -0,0 +1,666 @@ +""" +.. _custom_kernel_plugins: + +Using Custom Kernels within TensorRT Engines with Torch-TensorRT +=================================================================== + +We are going to demonstrate how a developer could include a custom kernel in a TensorRT engine using Torch-TensorRT + +Torch-TensorRT supports falling back to PyTorch implementations of operations in the case that Torch-TensorRT +does not know how to compile them in TensorRT. However, this comes at the cost of a graph break and will reduce the performance of the model. +The easiest way to fix lack of support for ops is by adding a decomposition (see: +`Writing lowering passes for the Dynamo frontend `_) - which defines the operator +in terms of PyTorch ops that are supported in Torch-TensorRT or a converter (see: +`Writing converters for the Dynamo frontend `_) - which defines the operator in terms of TensorRT operators. + +In some cases there isnt a great way to do either of these, perhaps because the operator is a custom kernel that is not part of standard PyTorch or +TensorRT cannot support it natively. + +For these cases, it is possible to use a TensorRT plugin to replace the operator **inside** the TensorRT engine, thereby avoiding +the performance and resource overhead from a graph break. +For the sake of demonstration, consider the operation circular padding. Circular padding is useful for ops like circular convolution in deep learning. +The following image denotes how the original image (red) is circular padded once (green) and twice (blue): + +.. image:: /tutorials/images/circ_pad_example.png + :width: 512px + :height: 512px + :scale: 50 % + :align: right + + +""" + +# %% +# Writing Custom Operators in PyTorch +# ----------------------------------------- +# +# Assume for whatever reason we would like to use a custom implementation of circular padding. In this case as implemented using a kernel written in `OpenAI Triton `_ +# +# When using custom kernels with PyTorch, it is recommended to take the additional step of registering them as formal operators in PyTorch. This will both make it easier to handle +# the operation in Torch-TensorRT and simplify its use in PyTorch. This could either be done as part of a C++ library or in Python. (see: `Custom ops in C++ `_ and `Python custom ops `_ for more details ) + +from typing import Any, Sequence + +import numpy as np +import torch +import triton +import triton.language as tl +from torch.library import custom_op + + +# Defining the kernel to be run on the GPU +@triton.jit # type: ignore +def circ_pad_kernel( + X: torch.Tensor, + all_pads_0: tl.int32, + all_pads_2: tl.int32, + all_pads_4: tl.int32, + all_pads_6: tl.int32, + orig_dims_0: tl.int32, + orig_dims_1: tl.int32, + orig_dims_2: tl.int32, + orig_dims_3: tl.int32, + Y: torch.Tensor, + Y_shape_1: tl.int32, + Y_shape_2: tl.int32, + Y_shape_3: tl.int32, + X_len: tl.int32, + Y_len: tl.int32, + BLOCK_SIZE: tl.constexpr, +) -> None: + pid = tl.program_id(0) + i = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE) + + mask_y = i < Y_len + + i3 = i % Y_shape_3 + i2 = (i // Y_shape_3) % Y_shape_2 + i1 = (i // Y_shape_3 // Y_shape_2) % Y_shape_1 + i0 = i // Y_shape_3 // Y_shape_2 // Y_shape_1 + + j0 = (i0 - all_pads_0 + orig_dims_0) % orig_dims_0 + j1 = (i1 - all_pads_2 + orig_dims_1) % orig_dims_1 + j2 = (i2 - all_pads_4 + orig_dims_2) % orig_dims_2 + j3 = (i3 - all_pads_6 + orig_dims_3) % orig_dims_3 + + load_idx = ( + orig_dims_3 * orig_dims_2 * orig_dims_1 * j0 + + orig_dims_3 * orig_dims_2 * j1 + + orig_dims_3 * j2 + + j3 + ) + mask_x = load_idx < X_len + + x = tl.load(X + load_idx, mask=mask_x) + + tl.store(Y + i, x, mask=mask_y) + + +# The launch code wrapped to expose it as a custom operator in our namespace +@custom_op("torchtrt_ex::triton_circular_pad", mutates_args=()) # type: ignore[misc] +def triton_circular_pad(x: torch.Tensor, padding: Sequence[int]) -> torch.Tensor: + out_dims = np.ones(len(x.shape), dtype=np.int32) + for i in range(np.size(padding) // 2): + out_dims[len(out_dims) - i - 1] = ( + x.shape[len(out_dims) - i - 1] + padding[i * 2] + padding[i * 2 + 1] + ) + + y = torch.empty(tuple(out_dims.tolist()), device=x.device) + + N = len(x.shape) + all_pads = np.zeros((N * 2,), dtype=np.int32) + orig_dims = np.array(x.shape, dtype=np.int32) + out_dims = np.array(x.shape, dtype=np.int32) + + for i in range(len(padding) // 2): + out_dims[N - i - 1] += padding[i * 2] + padding[i * 2 + 1] + all_pads[N * 2 - 2 * i - 2] = padding[i * 2] + all_pads[N * 2 - 2 * i - 1] = padding[i * 2 + 1] + + blockSize = 256 + numBlocks = (int((np.prod(out_dims) + blockSize - 1) // blockSize),) + + circ_pad_kernel[numBlocks]( + x, + all_pads[0], + all_pads[2], + all_pads[4], + all_pads[6], + orig_dims[0], + orig_dims[1], + orig_dims[2], + orig_dims[3], + y, + out_dims[1], + out_dims[2], + out_dims[3], + int(np.prod(orig_dims)), + int(np.prod(out_dims)), + BLOCK_SIZE=256, + ) + + return y + + +# %% +# Above is all that is required to create a custom operator for PyTorch. We can now call it directly as ``torch.ops.torchtrt_ex.triton_circular_pad`` + +# %% +# Testing our custom op +# ^^^^^^^^^^^^^^^^^^^^^^^ + +# %% +# The native PyTorch implementation + +ex_input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3).to("cuda") +padding = (1, 1, 2, 0) +torch.nn.functional.pad(ex_input, padding, "circular") + +############################################################################## +# .. code-block:: none +# +# tensor([[[[5., 3., 4., 5., 3.], +# [8., 6., 7., 8., 6.], +# [2., 0., 1., 2., 0.], +# [5., 3., 4., 5., 3.], +# [8., 6., 7., 8., 6.]]]], device='cuda:0') + +# %% +# Our custom implementation +torch.ops.torchtrt_ex.triton_circular_pad(ex_input, padding) + +############################################################################## +# .. code-block:: none +# +# tensor([[[[5., 3., 4., 5., 3.], +# [8., 6., 7., 8., 6.], +# [2., 0., 1., 2., 0.], +# [5., 3., 4., 5., 3.], +# [8., 6., 7., 8., 6.]]]], device='cuda:0') + +# %% +# We have defined the minimum to start using our custom op in PyTorch, but to take the extra step of making this operator tracable by Dynamo (a prerequisite for being supported in Torch-TensorRT), +# we need to define a "Fake Tensor" implementation of the op. This function defines the effect that our kernel would have on input tensors in terms of native PyTorch ops. +# It allows Dynamo to calculate tensor properties like sizes, stride, device etc. without needing to use real data (More information `here `_). +# In our case we can just use the native circular pad operation as our FakeTensor implementation. + + +@torch.library.register_fake("torchtrt_ex::triton_circular_pad") # type: ignore[misc] +def _(x: torch.Tensor, padding: Sequence[int]) -> torch.Tensor: + return torch.nn.functional.pad(x, padding, "circular") + + +# Additionally one may want to define an autograd implementation for the backwards pass to round out the custom op implmentation but that is beyond the scope of this tutorial (see https://pytorch.org/docs/main/library.html#torch.library.register_autograd for more) + + +# %% +# Using the Custom Operator in a Model +# ----------------------------------------- +# We can now create models using our custom op. Here is a small example one that uses both natively supported operators (Convolution) and our custom op. + +from typing import Sequence + +from torch import nn + + +class MyModel(nn.Module): # type: ignore[misc] + def __init__(self, padding: Sequence[int]): + super().__init__() + + self.padding = padding + self.conv = nn.Conv2d(1, 5, kernel_size=3) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + padded_x = torch.ops.torchtrt_ex.triton_circular_pad(x, self.padding) + y = self.conv(padded_x) + + return y + + +my_model = MyModel((1, 1, 2, 0)).to("cuda") +my_model(ex_input) + +############################################################################## +# .. code-block:: none +# +# tensor([[[[-0.2604, -0.4232, -0.3041], +# [-3.0833, -3.2461, -3.1270], +# [-0.2450, -0.4079, -0.2887]], +# +# [[ 0.2828, -0.0373, 1.0332], +# [-2.3143, -2.6344, -1.5638], +# [-1.1867, -1.5068, -0.4363]], +# +# [[ 1.7937, 1.3488, 2.1350], +# [ 0.7966, 0.3517, 1.1379], +# [ 3.5537, 3.1088, 3.8950]], +# +# [[-1.0550, -0.6163, -1.0109], +# [ 0.5245, 0.9632, 0.5686], +# [ 0.3775, 0.8162, 0.4216]], +# +# [[-0.4311, -0.1649, -1.2091], +# [-4.3668, -4.1006, -5.1447], +# [-5.0352, -4.7689, -5.8131]]]], device='cuda:0') + +# %% +# If we try to compile this model with Torch-TensorRT, we can see that (as of Torch-TensorRT 2.4.0) a number of subgraphs are created to run the custom op in PyTorch and the convolution in TensorRT + +import torch_tensorrt as torchtrt + +torchtrt.compile( + my_model, + inputs=[ex_input], + dryrun=True, # Check the support of the model without having to build the engines + min_block_size=1, +) + +############################################################################### +# .. code-block:: none +# +# GraphModule( +# (_run_on_gpu_0): GraphModule() +# (_run_on_acc_1): GraphModule( +# (conv): Module() +# ) +# ) +# +# ++++++++++++++ Dry-Run Results for Graph +++++++++++++++++ +# +# The graph consists of 2 Total Operators, of which 1 operators are supported, 50.0% coverage +# +# The following ops are currently unsupported or excluded from conversion, and are listed with their op-count in the graph: +# torch.ops.torchtrt_ex.triton_circular_pad.default: 1 +# +# The following nodes are currently set to run in Torch: +# Node: torch.ops.torchtrt_ex.triton_circular_pad.default, with layer location: __/triton_circular_pad +# Note: Some of the above nodes may be supported, but were not included in a TRT graph by the partitioner +# +# Compiled with: CompilationSettings(enabled_precisions={}, debug=False, workspace_size=0, min_block_size=1, torch_executed_ops=set(), pass_through_build_failures=False, max_aux_streams=None, version_compatible=False, optimization_level=None, use_python_runtime=False, truncate_double=False, use_fast_partitioner=True, enable_experimental_decompositions=False, device=Device(type=DeviceType.GPU, gpu_id=0), require_full_compilation=False, disable_tf32=False, sparse_weights=False, refit=False, engine_capability=, num_avg_timing_iters=1, dla_sram_size=1048576, dla_local_dram_size=1073741824, dla_global_dram_size=536870912, dryrun=True, hardware_compatible=False) +# +# Graph Structure: +# +# Inputs: List[Tensor: (1, 1, 3, 3)@float32] +# ... +# TRT Engine #1 - Submodule name: _run_on_acc_1 +# Engine Inputs: List[Tensor: (1, 1, 5, 5)@float32] +# Number of Operators in Engine: 1 +# Engine Outputs: Tensor: (1, 5, 3, 3)@float32 +# ... +# Outputs: List[Tensor: (1, 5, 3, 3)@float32] +# +# --------- Aggregate Stats --------- +# +# Average Number of Operators per TRT Engine: 1.0 +# Most Operators in a TRT Engine: 1 +# +# ********** Recommendations ********** +# +# - For minimal graph segmentation, select min_block_size=1 which would generate 1 TRT engine(s) +# - The current level of graph segmentation is equivalent to selecting min_block_size=1 which generates 1 TRT engine(s) +# +# We see that there is going to be 2 subgraphs, one that will run through PyTorch for our custom op and one through TensorRT for the convolution. This graph break is going to be a significant portion of the latency of this model. + + +# %% +# Wrapping Custom Kernels to use in TensorRT +# ============================================= +# +# To address this graph break, the first step is to make our kernel implementation available in TensorRT. Again this can be done in either C++ or Python. For the actual details on how to implement +# TensorRT plugins refer `here `_. From a high level, similar to PyTorch you will need to +# define systems to handle setting up the operator, calculating the effect of the operation abstractly, serializing the op and the actual mechanics of calling the implementation of the op in the engine. +# + +import pickle as pkl +from typing import Any, List, Optional, Self + +import cupy as cp # Needed to work around API gaps in PyTorch to build torch.Tensors around preallocated CUDA memory +import numpy as np + +import tensorrt as trt + + +class CircularPaddingPlugin(trt.IPluginV2DynamicExt): # type: ignore[misc] + def __init__( + self, field_collection: Optional[List[trt.PluginFieldCollection]] = None + ): + super().__init__() + self.pads = [] + self.X_shape: List[int] = [] + + self.num_outputs = 1 + self.plugin_namespace = "" + self.plugin_type = "CircularPaddingPlugin" + self.plugin_version = "1" + + if field_collection is not None: + assert field_collection[0].name == "pads" + self.pads = field_collection[0].data + + def get_output_datatype( + self, index: int, input_types: List[trt.DataType] + ) -> trt.DataType: + return input_types[0] + + def get_output_dimensions( + self, + output_index: int, + inputs: List[trt.DimsExprs], + exprBuilder: trt.IExprBuilder, + ) -> trt.DimsExprs: + + output_dims = trt.DimsExprs(inputs[0]) + + for i in range(np.size(self.pads) // 2): + output_dims[len(output_dims) - i - 1] = exprBuilder.operation( + trt.DimensionOperation.SUM, + inputs[0][len(output_dims) - i - 1], + exprBuilder.constant(self.pads[i * 2] + self.pads[i * 2 + 1]), + ) + + return output_dims + + def configure_plugin( + self, + inp: List[trt.DynamicPluginTensorDesc], + out: List[trt.DynamicPluginTensorDesc], + ) -> None: + X_dims = inp[0].desc.dims + self.X_shape = np.zeros((len(X_dims),)) + for i in range(len(X_dims)): + self.X_shape[i] = X_dims[i] + + def serialize(self) -> bytes: + return pkl.dumps({"pads": self.pads}) + + def supports_format_combination( + self, pos: int, in_out: List[trt.PluginTensorDesc], num_inputs: int + ) -> bool: + assert num_inputs == 1 + assert pos < len(in_out) + + desc = in_out[pos] + if desc.format != trt.TensorFormat.LINEAR: + return False + + # first input should be float16 or float32 + if pos == 0: + return bool( + (desc.type == trt.DataType.FLOAT) or desc.type == (trt.DataType.HALF) + ) + + # output should have the same type as the input + if pos == 1: + return bool((in_out[0].type == desc.type)) + + return False + + def enqueue( + self, + input_desc: List[trt.PluginTensorDesc], + output_desc: List[trt.PluginTensorDesc], + inputs: List[int], + outputs: List[int], + workspace: int, + stream: int, + ) -> None: + + # Host code is slightly different as this will be run as part of the TRT execution + in_dtype = torchtrt.dtype.try_from(input_desc[0].type).to(np.dtype) + + a_mem = cp.cuda.UnownedMemory( + inputs[0], np.prod(input_desc[0].dims) * cp.dtype(in_dtype).itemsize, self + ) + c_mem = cp.cuda.UnownedMemory( + outputs[0], + np.prod(output_desc[0].dims) * cp.dtype(in_dtype).itemsize, + self, + ) + + a_ptr = cp.cuda.MemoryPointer(a_mem, 0) + c_ptr = cp.cuda.MemoryPointer(c_mem, 0) + + a_d = cp.ndarray((np.prod(input_desc[0].dims)), dtype=in_dtype, memptr=a_ptr) + c_d = cp.ndarray((np.prod(output_desc[0].dims)), dtype=in_dtype, memptr=c_ptr) + + a_t = torch.as_tensor(a_d, device="cuda") + c_t = torch.as_tensor(c_d, device="cuda") + + N = len(self.X_shape) + all_pads = np.zeros((N * 2,), dtype=np.int32) + orig_dims = np.array(self.X_shape, dtype=np.int32) + out_dims = np.array(self.X_shape, dtype=np.int32) + + for i in range(np.size(self.pads) // 2): + out_dims[N - i - 1] += self.pads[i * 2] + self.pads[i * 2 + 1] + all_pads[N * 2 - 2 * i - 2] = self.pads[i * 2] + all_pads[N * 2 - 2 * i - 1] = self.pads[i * 2 + 1] + + all_pads = all_pads.tolist() + orig_dims = orig_dims.tolist() + out_dims = out_dims.tolist() + + blockSize = 256 + numBlocks = (int((np.prod(out_dims) + blockSize - 1) // blockSize),) + + # Call the same kernel implementation we use in PyTorch + circ_pad_kernel[numBlocks]( + a_t, + all_pads[0], + all_pads[2], + all_pads[4], + all_pads[6], + orig_dims[0], + orig_dims[1], + orig_dims[2], + orig_dims[3], + c_t, + out_dims[1], + out_dims[2], + out_dims[3], + int(np.prod(orig_dims)), + int(np.prod(out_dims)), + BLOCK_SIZE=256, + ) + + def clone(self) -> Self: + cloned_plugin = CircularPaddingPlugin() + cloned_plugin.__dict__.update(self.__dict__) + return cloned_plugin + + +class CircularPaddingPluginCreator(trt.IPluginCreator): # type: ignore[misc] + def __init__(self): + super().__init__() + + self.name = "CircularPaddingPlugin" + self.plugin_namespace = "" + self.plugin_version = "1" + self.field_names = trt.PluginFieldCollection( + [trt.PluginField("pads", np.array([]), trt.PluginFieldType.INT32)] + ) + + def create_plugin( + self, name: str, field_collection: trt.PluginFieldCollection_ + ) -> CircularPaddingPlugin: + return CircularPaddingPlugin(field_collection) + + def deserialize_plugin(self, name: str, data: bytes) -> CircularPaddingPlugin: + pads_dict = pkl.loads(data) + print(pads_dict) + deserialized = CircularPaddingPlugin() + deserialized.__dict__.update(pads_dict) + print(deserialized.pads) + return deserialized + + +# Register the plugin creator in the TensorRT Plugin Registry +TRT_PLUGIN_REGISTRY = trt.get_plugin_registry() +TRT_PLUGIN_REGISTRY.register_creator(CircularPaddingPluginCreator(), "") # type: ignore[no-untyped-call] + + +# %% +# Using Torch-TensorRT to Insert the Kernel +# ============================================= +# Now with our TensorRT plugin, we can create a converter so that Torch-TensorRT knows to insert our plugin in place of our custom circular padding operator. +# More information on writing converters can be found `here `_ + +from typing import Dict, Tuple + +from torch.fx.node import Argument, Target +from torch_tensorrt.dynamo.conversion import ( + ConversionContext, + dynamo_tensorrt_converter, +) +from torch_tensorrt.dynamo.conversion.converter_utils import get_trt_tensor +from torch_tensorrt.fx.converters.converter_utils import set_layer_name + + +@dynamo_tensorrt_converter( + torch.ops.torchtrt_ex.triton_circular_pad.default +) # type: ignore +# Recall the schema defined above: +# torch.ops.torchtrt_ex.triton_circular_pad.default(Tensor x, IntList padding) -> Tensor +def circular_padding_converter( + ctx: ConversionContext, + target: Target, + args: Tuple[Argument, ...], + kwargs: Dict[str, Argument], + name: str, +): + + # How to retrieve a plugin if it is defined elsewhere (e.g. linked library) + plugin_registry = trt.get_plugin_registry() + plugin_creator = plugin_registry.get_plugin_creator( + type="CircularPaddingPlugin", version="1", plugin_namespace="" + ) + assert plugin_creator, f"Unabled to find CircularPaddingPlugin creator" + + # Pass configurations to the plugin implementation + field_configs = trt.PluginFieldCollection( + [ + trt.PluginField( + "pads", + np.array( + args[1], dtype=np.int32 + ), # Arg 1 of `torch.ops.torchtrt_ex.triton_circular_pad` is the int list containing the padding settings. Note: the dtype matters as you are eventually passing this as a c-like buffer + trt.PluginFieldType.INT32, + ), + ] + ) + + plugin = plugin_creator.create_plugin(name=name, field_collection=field_configs) + assert plugin, "Unable to create CircularPaddingPlugin" + + input_tensor = args[ + 0 + ] # Arg 0 `torch.ops.torchtrt_ex.triton_circular_pad` is the input tensor + if not isinstance(input_tensor, trt.ITensor): + # Freeze input tensor if not TensorRT Tensor already + input_tensor = get_trt_tensor(ctx, input_tensor, f"{name}_input") + + layer = ctx.net.add_plugin_v2( + [input_tensor], plugin + ) # Add the plugin to the network being constructed + layer.name = f"circular_padding_plugin-{name}" + return layer.get_output(0) + + +# %% +# Finally, we are now able to fully compile our model + +trt_model = torchtrt.compile( + my_model, + inputs=[ex_input], + min_block_size=1, +) +############################################################################### +# .. code-block:: none +# +# GraphModule( +# (_run_on_acc_0): TorchTensorRTModule() +# ) +# +# +++++++++++++++ Dry-Run Results for Graph ++++++++++++++++ +# +# The graph consists of 2 Total Operators, of which 2 operators are supported, 100.0% coverage +# +# Compiled with: CompilationSettings(enabled_precisions={}, debug=True, workspace_size=0, min_block_size=1, torch_executed_ops=set(), pass_through_build_failures=False, max_aux_streams=None, version_compatible=False, optimization_level=None, use_python_runtime=False, truncate_double=False, use_fast_partitioner=True, enable_experimental_decompositions=False, device=Device(type=DeviceType.GPU, gpu_id=0), require_full_compilation=False, disable_tf32=False, sparse_weights=False, refit=False, engine_capability=, num_avg_timing_iters=1, dla_sram_size=1048576, dla_local_dram_size=1073741824, dla_global_dram_size=536870912, dryrun=False, hardware_compatible=False) +# +# Graph Structure: +# +# Inputs: List[Tensor: (1, 1, 3, 3)@float32] +# ... +# TRT Engine #1 - Submodule name: _run_on_acc_0 +# Engine Inputs: List[Tensor: (1, 1, 3, 3)@float32] +# Number of Operators in Engine: 2 +# Engine Outputs: Tensor: (1, 5, 3, 3)@float32 +# ... +# Outputs: List[Tensor: (1, 5, 3, 3)@float32] +# +# ---------- Aggregate Stats ------------- +# +# Average Number of Operators per TRT Engine: 2.0 +# Most Operators in a TRT Engine: 2 +# +# ********** Recommendations ********** +# +# - For minimal graph segmentation, select min_block_size=2 which would generate 1 TRT engine(s) +# - The current level of graph segmentation is equivalent to selecting min_block_size=2 which generates 1 TRT engine(s) + +############################################## +# As you can see, now there is only one subgraph created for the TensorRT engine that contains both our custom kernel and the native convolution operator. + +print(trt_model(ex_input)) + +############################################################################## +# .. code-block:: none +# +# tensor([[[[-0.2604, -0.4232, -0.3041], +# [-3.0833, -3.2461, -3.1270], +# [-0.2450, -0.4079, -0.2887]], +# +# [[ 0.2828, -0.0373, 1.0332], +# [-2.3143, -2.6344, -1.5638], +# [-1.1867, -1.5068, -0.4363]], +# +# [[ 1.7937, 1.3488, 2.1350], +# [ 0.7966, 0.3517, 1.1379], +# [ 3.5537, 3.1088, 3.8950]], +# +# [[-1.0550, -0.6163, -1.0109], +# [ 0.5245, 0.9632, 0.5686], +# [ 0.3775, 0.8162, 0.4216]], +# +# [[-0.4311, -0.1649, -1.2091], +# [-4.3668, -4.1006, -5.1447], +# [-5.0352, -4.7689, -5.8131]]]], device='cuda:0') + + +# %% +# We can verify our implementation is run correctly by both TensorRT and PyTorch + +print(my_model(ex_input) - trt_model(ex_input)) + +############################################################################## +# .. code-block:: none +# +# tensor([[[[0., 0., 0.], +# [0., 0., 0.], +# [0., 0., 0.]], +# +# [[0., 0., 0.], +# [0., 0., 0.], +# [0., 0., 0.]], +# +# [[0., 0., 0.], +# [0., 0., 0.], +# [0., 0., 0.]], +# +# [[0., 0., 0.], +# [0., 0., 0.], +# [0., 0., 0.]], +# +# [[0., 0., 0.], +# [0., 0., 0.], +# [0., 0., 0.]]]], device='cuda:0', grad_fn=) diff --git a/examples/dynamo/requirements.txt b/examples/dynamo/requirements.txt new file mode 100644 index 0000000000..a66d12bd1f --- /dev/null +++ b/examples/dynamo/requirements.txt @@ -0,0 +1,5 @@ +cupy==13.1.0 +tensorrt==10.0.1 +torch>=2.4.0.dev20240503+cu121 +torch-tensorrt>=2.4.0.dev20240503+cu121 +triton==2.3.0