From f722539d07ad2ac67faa8d348d9458521478ff73 Mon Sep 17 00:00:00 2001 From: zilto Date: Thu, 9 Nov 2023 15:43:09 -0500 Subject: [PATCH] added webscraper contrib --- .../contrib/user/zilto/webscraper/README.md | 20 ++ .../contrib/user/zilto/webscraper/__init__.py | 96 ++++++ .../contrib/user/zilto/webscraper/dag.png | Bin 0 -> 46857 bytes .../user/zilto/webscraper/requirements.txt | 4 + .../contrib/user/zilto/webscraper/run.ipynb | 273 ++++++++++++++++++ .../contrib/user/zilto/webscraper/tags.json | 7 + .../user/zilto/webscraper/valid_configs.jsonl | 0 7 files changed, 400 insertions(+) create mode 100644 contrib/hamilton/contrib/user/zilto/webscraper/README.md create mode 100644 contrib/hamilton/contrib/user/zilto/webscraper/__init__.py create mode 100644 contrib/hamilton/contrib/user/zilto/webscraper/dag.png create mode 100644 contrib/hamilton/contrib/user/zilto/webscraper/requirements.txt create mode 100644 contrib/hamilton/contrib/user/zilto/webscraper/run.ipynb create mode 100644 contrib/hamilton/contrib/user/zilto/webscraper/tags.json create mode 100644 contrib/hamilton/contrib/user/zilto/webscraper/valid_configs.jsonl diff --git a/contrib/hamilton/contrib/user/zilto/webscraper/README.md b/contrib/hamilton/contrib/user/zilto/webscraper/README.md new file mode 100644 index 000000000..fa3a0b505 --- /dev/null +++ b/contrib/hamilton/contrib/user/zilto/webscraper/README.md @@ -0,0 +1,20 @@ +# Purpose of this module + +This module implements a simple webscraper that collects the specified HTML tags and removes undesirable ones. Simply give it a list of URLs. + +Timeout and retry logic for HTTP request is implemented using the `tenacity` package + +# Configuration Options +## Config.when +This module doesn't receive any configuration. + +## Inputs + - `urls` (Required): a list of valid url to scrape + - `tags_to_extract`: a list of HTML tags to extract + - `tags_to_remove`: a list of HTML tags to remove + +## Overrides + - `parsed_html`: if the function doesn't provide enough flexibility, another parser can be provided as long as it has parameters `url` and `html_page` and outputs a `ParsingResult` object. + +# Limitations +- The timeout and retry values need to be edited via the decorator of `html_page()`. diff --git a/contrib/hamilton/contrib/user/zilto/webscraper/__init__.py b/contrib/hamilton/contrib/user/zilto/webscraper/__init__.py new file mode 100644 index 000000000..762bf7cd0 --- /dev/null +++ b/contrib/hamilton/contrib/user/zilto/webscraper/__init__.py @@ -0,0 +1,96 @@ +import logging +from typing import Any, List + +logger = logging.getLogger(__name__) + +from hamilton import contrib + +with contrib.catch_import_errors(__name__, __file__, logger): + from bs4 import BeautifulSoup + import lxml # noqa: F401 + import requests + from tenacity import retry, stop_after_attempt, wait_random_exponential + +import dataclasses + +from hamilton.htypes import Collect, Parallelizable + + +@dataclasses.dataclass +class ParsingResult: + """Result from the parsing function + + :param url: url to the HTML page + :param parsed: the result of the parsing function + """ + + url: str + parsed: Any + + +def url(urls: List[str]) -> Parallelizable[str]: + """Iterate over the list of urls and create one branch per url + + :param urls: list of url to scrape and parse + :return: a single url to scrape and parse + """ + for url_ in urls: + yield url_ + + +@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3)) +def html_page(url: str) -> str: + """Get the HTML page as string + The tenacity decorator sets the timeout and retry logic + + :param url: a single url to request + :return: the HTML page as a string + """ + response = requests.get(url) + response.raise_for_status() + return response.text + + +def parsed_html( + url: str, + html_page: str, + tags_to_extract: List[str] = ["p", "li", "div"], + tags_to_remove: List[str] = ["script", "style"], +) -> ParsingResult: + """Parse an HTML string using BeautifulSoup + + :param url: the url of the requested page + :param html_page: the HTML page associated with the url + :param tags_to_extract: HTML tags to extract and gather + :param tags_to_remove: HTML tags to remove + :return: the ParsingResult which contains the url and the parsing results + """ + soup = BeautifulSoup(html_page, features="lxml") + + for tag in tags_to_remove: + for element in soup.find_all(tag): + element.decompose() + + content = [] + for tag in tags_to_extract: + for element in soup.find_all(tag): + if tag == "a": + href = element.get("href") + if href: + content.append(f"{element.get_text()} ({href})") + else: + content.append(element.get_text(strip=True)) + else: + content.append(element.get_text(strip=True)) + content = " ".join(content) + + return ParsingResult(url=url, parsed=content) + + +def parsed_html_collection(parsed_html: Collect[ParsingResult]) -> List[ParsingResult]: + """Collect parallel branches of `parsed_html` + + :param parsed_html: receive the ParsingResult associated with each url + :return: list of ParsingResult + """ + return list(parsed_html) diff --git a/contrib/hamilton/contrib/user/zilto/webscraper/dag.png b/contrib/hamilton/contrib/user/zilto/webscraper/dag.png new file mode 100644 index 0000000000000000000000000000000000000000..1b3768e5d922bad0881c5607525e384c679268f6 GIT binary patch literal 46857 zcmce;2UOEt*Dr`2Q4keGKtYsVrAwD02uSZ;q)C?=AT$*ODI#4uN(n6>C3J# z5_*Twdzlk`o_F2()}6WE%&a>xYlWCo_Bm(Q-`?l{QB_%vn23sqfPjEl{<(}g0l`IU z0)q4Ugy+Hk+F}9ag#zWEH*5R0+rI)B}GaIF^ovAm%<-!s#GseRY;pT_jzTM>0cuDQz z$YskfM@MGJ4F;JjAqgTsmJfzHs;s&WI=1%@cUG(R?#ahloC72fcsz*hyM+Ja>D8z1 zc-jb#T9ol0FKI`v;y+g&iQpUh`jsF~$`)_qt(a6Ny zym=G4x8|nG!W^ZRA&amZQD0;QBlwqmd%dg-ei^TDOna{&ISJP-o@$F_Q_#UrA4B)d z0CGzzATaRRvu8rz$jDOZ5_*kD3q4=0x{_O4T88VOkbyiczoXTP zFJJDW2gW{@JB%kLCVHJ7ADA|Nm`(b3;u6%mOV|2scCwX{XneMNooCx9!+*Q4j&c}P zYZ7j3bxT~M1E)bJ-v>@x#BFJ2a#F3%Ylo};xWsm-@EO&wY=a61m${CsSFdv4y$(01 zpb>E|Ablt(0ynJc6Ho(d6%!S;>Cf2!2Lo9f8?l!7!_QW%rXwr@0s>4-FojrFs&HAj zcD^QStb$gNu^8R2+4lHAQqVQuu&M;kpaa$($``B@L~?)Am-WdLUBbx3L>nk{cVz_1 zv;_8J!$${yP@I_oVmNbzejurkkkFoSTUS?k-Di3hY*wv^Jee~oUBJ(mEznj0JLFe< z>zJDicH=C3eEs9&VnRZC$RK;_x0(bLZ?4dfRW7kg3* zI*je(khJ*T6j}`>sB2ucpCcoCvlQdqmIoOa87-}>OsZY2PY!o{j&{e(?UP8qIbx9| z?0d^YT*AV_VqzzKS&EKzJA-qbNuhktO@|qm&=ojv(x|>ZPQ%Le!{s81(`o$h3U?^# ztQ^th_InsFL5sGSjp=3_V1mhpzzB&`4`e=9-F6h|t6#x4730{2+*fn~N7}DO(eL1} z60EEW`2(Z+AFfkwPDm1&;oHP>ig|6nUOOd?q9?xl*dM+NX7E?f4Sy6 zM{sJlZ{I#SK91ppc|4CIqoizAtHqOriuvvaYq$#to@xr^GOCV=jC9&w=mi2Lak`%Y zYriHbjOW|u-L418L&$6YeUlQuD>3vrGDzurp56B?x$Wl^7_!<#7}zpkg%}pH*V-ulY*w{1*)5=bzlovpNN331Y|UO zEBJlgr(8px9G<9{WJfa00! ze`$EwOh*=bzDh^YDBCHXLa=QY}y4aBJpsi&P0Bx z;0W=<9H4Kdg7>g~CWoQ7k$>z0g<3<2##<5^(c!PsK`9~-OLKF228NUU=?E3E9ZjTo z{n;trUV?X0LPElqFZ(BZ*b3VrP7V&G$ai>>W1eKui2ybGf1$L$)cSn;_U-Zw8t=J| zgtD?StnU$A^8IBeC#N*;O?f)lm&c|Jg+?_2q#D^TBG1&1v3NV{pU+A>lKVeZ$tLbL znA!tiFM&{m$(n)H4;AQT4A^XqXI{bY`<@0>i3VQBB8f>z?nq^3XV2NM48jdCOfm1? zzXww9voY(iLdi zVt#jYbWG)tfQC=4(`TwRbn&k*E_0)`o-P_PyygHof=c%|sxD@e}B-otM|)+IUTx*Ah$V$XJny1bP5icp^H> zu4Kyl_frxQJU6DAfRaJGGj(ef8Vb?<0$b&PMXnwn{-Ij26}p?8Yf|qEw8}IvfX)U| z($ZMZ`TOW)paI2w4h!@Vc|h}(-wBuH^;n;H7J6rPw@52rd$uhWNa-35=V)u1F9t|U zUtwljRt6gJ6j-<?jCs+~@ z(G7+J1|%Nc2l$%8#GBhO?fdt~XciTG_qN26lC?h$*O}~90|;xC#}mA6WA2SHgoFg+ zU#j2n?Fg9Z8HQ4J*T$Cv^w#$F|9FXHG~NGAXYzlCO3-jl1D5~flPJ-R8(bDA7J8iD z@qVBGElJis?Bf4f!>vJnp-%Fm=yPTExHzNepUKK%xO)GbtGfcGRM4iM`9OSaFER>) zx7AOtHf{ay^O^sv8vlRfaR1xcnk-p_g$4LSyTEPL)6*l0o4lBsntJZsIpA2hxwvRl z-@%{RS-^R`)8r1M#@6p(b|rzgE6qrfDm+^&f6(VpbZ5+^`{3}9)d(+c{tw^SxC;Xx zDMu~C@4&)-)`q{KT3py;C1}7l$m<{uwdC5x-gv-+LOC#vJ%LW`M;eLzR{FAzsdO@H z4xCS=q#ocV5Dbsl%|YSg+=j z1?!Vpv&UZBKBmKG_J^xq=irCCt0E3#PxRtqIq}@!SK5 zI0MGU;(go}k$mWRBG(`eJxkGmsc;#WwY+@HY($3db>zHKjf!Q{sdiml(i5L3Z9!aw zLEYBI&oVAa*pHW=jQo5Lyw1K^(3YT*>xv~TEG!I2{z4cH&abow6=Xk4q+O&Jdex-c zV6VLzulpDE@omTAFO&k`59zVi&~&5zsCQv@wo*^wZM9NTz4*cOA_`Wdk5KsAl&nK- zghg1`YeYpNJo(N|)vA#9+0079du!M%mHie$Y)4Cr>ff`y^yayHTWhQ5bhx0t>(Xd} z50hM&2`e$>jDDmX+SMVjyrpji_Q^%Gt7+&dil|Jcz@w$l~Y>EGeNRdx3+aV(B7 zT%ru4F-@r{GHm^H6U^n`&T_g!cK<$HJ(n6`r?K)V{?d(##qSUt4sfUf#UF3PF+@V- zKHwFFH%fwaUx(#5uh|U39R-r6r>6cA`~yqo=U(l5Qf=}w>lJNz*!qnE=737@y)O{I zmHSCk-O7}dlnx&ratT{lS#a#TE?qOy7gH5`HGwu%eg$?VuamM8x_6R&l&c zJ_KkLruLG5W~Bzs*6|S1J}E3UHI2i7S~DX%)48QfkE1@~&V zX|Gu-{`~%etiS$;DnX!*M*t~S>FB%8YgL=g6jbx4T_WNoa;oi}ARG~$z_XKnHon*0 z**l&hXsSK?@z+qOhmLDePkNf)zBuPxOU?VE9eYDt)zHJ6IAlU%RPvri3A>|F-EoT5 z5sAmlSKk{$p7~g_q&Tk5)?)S~UfUI3p8(QOD<7MrYMs}gnHM}^cG1?0IyZ~N2Pbns z{G9Bhp*lPEe^djRGv+X-#8a-osiOxu6zn?obe)_>b&QNi1+#0+?eeoZ7}srXe2Xp5 zFJbS#dvaJvY7R${3ol?&Rzso64oO^Z$lT+@OI(8SI)YtD$mdRZq z;rwT8zS*G^V;VtgBg5KWRHaubWQo5kS_Ao>=2`-6{g{ig4MU&z_RHK|5PLNrI&Gu}!Gm!m{pZif8ct?$QIJS_)1?oy5 zoKLlcQv0121jR$9we8O~_J=r4wzoHk?YG}xe$U;nA*3rbss0avg-I!ztn!4W2>mSq zORI#n)m#_UywB0lqI#33jJm0EfwR<~gNvD}9%)5)n-E5l`Y||~bs)u_-DhHcLci*u z(197hWQ<5**qONC$)*MO-Ba#cG?UvBpc$(&B2+8Z-}yF(Cp4Yjd3;3^VQEkIt6QSu zWa|gUE}CU>DHbP@kX$3;ISKTi3L&@I_A=nFI=AU+7T#&QmEsZFFfrdZX`~G(v>f40 z=xaZ~*GJ(!*>YTTznC0}Mao>DvieJK>Y;caUYF@j2K|)6XE2Ae{cCtFz$|G9iP-$X zs%7Bjv%SYrkK8l#GYHj(FK(M-S0yOyhj4{Hm+xW7>#R_RV4_Ymro4GtDp+B_X8+Q~ z-z#Wd=7P8I6AXOUOH9PIdybgt>9~X@;?g;a2|JKqWjg7n+91G z=|MH?%+K8!tz7I6Y+vGMTbc8yqK{n~Pr!cqBKoMTx6$i;O-+#1>W6Ub{sQVUhdvRV zrp*r2#8;=n+NJL2iSMJ=Wuv**;>5(fHC4kCso;Yzw_gu`8_28*-J<#9K&87zn^zGg zQT`je?)ZyOPEHPK@M2RAR_i(71wi6K*h)!5bLuZ1ML)9ohse8C+tq)ZsY=3sJOQ2K z5&5VPciIP=B`2k~Y1(K! zUSp3!C{osW6Q?j`gH@E^SN znDa~|wEKteAy_D`PTXgh0neFl)JQs%*VfkZwc%~bDoLB8Te+=}dxB~o??7Ll$raW2 zEN>Wt=t=D_se7|OR_FtA)U(<^)T4g;Hil+h=J=qLBqg7Iw4BkVp-0k47@}YMI#OYF z`0S5Ec1j#{=2K@}=XHhTUXD8Vn~on$LmYaaqs^SQ!x*GCSAXeE%XND@_M;smjBk=# zJiZ%Kh+poH_fO2=86e&fakuk3L6~k?d2i2Yh>@(0aT>stzYOtPWW#_dn~z;vRi@eZ z0U-w}TPd1ee(@XVOfdY_sf*96&i8P8k*jvS0r+=J$W6nnya5AuXn|>{_JGa6#wG{M z#l=OLQla&^B9QXKgXrQQ$m(#tPpKW^(|umcmEq!*Jbcs_L#EuOEI>y`XFGIykD9s$ zph0OM&^R~|>`FFr0Zf5+iAO7k*l(NAfbC#DNRCV~r^7v$s9~S|&i?Cm>W)bD(55zZ^5dd*;WQtp#$l^t zl)Iy{2!B89+4nA$#Sr{14o|MH#5i=OCXn5?t?up2Q1&Zgl#T7`<*i1(Ou;@jN|F;> z;(URTSevVUi@CA4YnSe-hVMu7t9XRvXVhUU9L_kx!=G+N%#qLLr(tBU*0>qr@IvIk zZ1e4MTui*TzBPUM8oq~N%!N{ocMNU~oja#g5ql@qM|tD4JzG&ii!a=#$X%SP`O)n2 zPYUU+cyh`xtkV2&N3n_@N8=gHS2uE+rOEk?d85B+IKMDE5y`)J<0)RA3tNDTJf3(0 zXaFOAoP}1(U+Ru}#cwYkijH#^ZcY4po5~~~raYK4qh7Kbm7&lc<4_R$sa>itiv!zP zs?4@Wwe(IP(6ONC80ohs|B>47x%lILSm2f-9%z0UFI^JNKEvf8mt@|43-FW@(N?92M&_JBN5XR8LrnL_(u)@S}7**Ev3cfeDg} z_8mXBmnA;J(JZdr)Ss9m4N}s9(Yn?xk#ey>8Up^l1K#+;1^1yIrDA! zfEHTZ+g+R<>Cv&*jdC77n8g$6@g4Zr`5F;=7FB6F(@-vZ<Dl^9_FMm zNPvk!k2g>6{5@gCkBnE{0U=&qR4Z@j!4Lry=8ReVEUb%M;ISoeg+}jgGC6=SHz-WLO|(|FYmw- z+e{+k=(eKr$Kj`=%DUE~5;xo3jJ$c$zBCpW3RA{64%A?heUVC+tLf;sJJ162@)q9> z*UR@MCd{+xY&~$@UY7qT$kc5#V^XZX22(zDdW{KV8BD%MCsW>9#?aueQW%wP%0JN~h!Gx4Tx9Otki@_`dsO!@_&pPbP-7^9wb+^#e<; zY;(S`kk1Qlw$N+G>PaCD?mO{KTit14#Cd8iOYbb$J|!ns z?CdpP7OSvr)Q24_Kt|3-rnt{9$S{^d(~lOa+Pk^9&Ns? z0b;oLW)*SBRQJ_0%Cc8$L!jZk$9CpOCmTUKQRmTl@_g+f(2L)edvf`JD%^6<#31Ns zKbDr~aD3HPJB$c7P@&dR3a9ovp3Bef2n&TxZ<6$7x*ZKMDHrJ?s+Eh1j@BRfnA~Fw zXz5h0cf>r9@2ebfp+0e}LjGF|fUwJe2!hD@{_f9ySq}7*?LE8-NGFW1vBGVRpazqZ z=S4&x27I>A8>VrIvY?ZhLfBS~)7)QQ>BT~T>|nSayxE48GRU_Hb~VD$^Y+&Z)Dh}C z6SfrHXiVzybPu$9r{YWJfq1M1&#Vvn`?L01#|!zIx24Q8VIvsh8rNQ~2j6X;uh#k` zmameQLLh4OcvyshV%~YA)doRCXV=X#YsPIgk5opccx*;UO9l7OhzJ(_hI$>5S;ytz zkmX-GRc_D@YLt0ZTXPcWl`ut#eJ-@=NXy>ht>x$nh9JxCG%HOAlJ@@ z)mf>_cp?z_W(kQOgAe7^(aotZ|7IVyovPfJsmZ%agP4UK_wA#LInV z4LVpshXF>q)6IEY7`sXF<^rY39410=d(~+t5%HZvcJ`A7fJzuUjWlaa%~FhUvj&9* z^^$4q#?~TZ@`N`BtB%lVK3a8M(!zB}!vmrS5)b#KhUf4u`17o#v9atk(t)i-I&t4w zHwOp)Y*5o}Etn{ITF9&EmKj&P8_h5NTMY&f5sq zDxT7on^KkeuK$F>$a^(wnv&Czd2Ox>WLZCqw5mxTKJ~$t`v0-!UBDXZ&d0Ozpcta1 ztymUsX|b$Db(u{6Q=Rh1T53|HNAqng$pm$&_chr%@N-|>D8pj_6K%lnn7u}zFMr=M z$6Xl;_poA7TFc|Pr6~^|x1F(NQov8anf0YK+PKeBuhY}O%R|)Fz-^C5QDx8R1q4|; zTKZ9(+0iW9P9UdDAr`;l31i>~dG~)X5Tf)nuvt1rR}P7OlSp|~&?KV6@;OHtnT|ZI zb#xiP4kbi6^LY1P{vW&yEd-LHqhgC+e0rQu@yl7i5i_Prq57VwSbC`KgmzUfQUSBe zKH}UNrT%O>U(*!KMzE&UF_N2V(4>oU3q7u2U^GD-y~JAj*>D;5(Uaj^8RFT0;SO&n zEEQ0A8_d*f!xpQx;+Cr}v8vw%o&C>~ySUp};(alaM9z6zoJ|30{Lwa~Vi=uQ(Xx*%&+-rxDHaWhNo-5DSx6<42ti}#xl1u0}~LpBh-)gGDObzk*3YcVvxFaXK_f?INHAAg_SZWM+f&m{&!{5T;4zS4!dMB^6a zLyqYcF~O)jt{b((|?F0Y^)3) zE5vVHo^UeP89G$7_3Kx7s0In~uI~(nC#empKh?X2Y+R$= z68lPKqBz(oqgdoq;?k8%f|D5!So`UjeM z=XP4OU!Dl)HSXKpvSA-bN#6Pfhjx~)@poHdh3%@HM_sqVW}U0;%3Fj;SX*1SrIxb> zhk9a{=LQQBzS;;Oj!wL?^-INT>cj?Y67|2v8~0&~{^8C;9R0i724*JiqbuNzMJX z+7jm32?IfGv6(x=9jaO7g;MFiQPQ)FAEC4#SOYtpyNvP|Lxh9`mJd%m6tBkhTYg}5 zJaW6vz9o-$;+b!PDBmuTGEdZw@UW4rUS#^Xt%iGX&<>`ws?KUtn@FAK_U6XbDaJqf z*)(=+`vK%0-5-%Y0*4wN$pL=CAp|o320`t_sa@mDH!b5u4P9z^{>+2WAN~ zj3OoHqXF-b4t)4T@JvgA&#p)ZN=ow>*~iw#JTj_;+!B_2O|Z^E`gz zuSe@m%q2bdp@i831V%hKRymi5nilu)AEE;^q_`@tfqd= ze7vn8e)?-KgJrB1FD;egR_3r|YQZ&GnaaRG%A*~&@9%fIQp%bXOWbi?1-e3C4T@%j zcS`Dfl2yM*x0Gy*4mcOfu^m@)`;2=Kg9qbo=OP zC>Z|f$LX+@7y(CQwYO*Y&ip2X2->5wQC-E&2l zRfN>r7j2_T!rRwyd^^ULd8Fa3i1adVa_ zqMptRw(>hvLS>hmT3H7InHr}>+*OhU&oBc_t*`mER}!l1laf>5&pp8q1VoL+Bdujt z5#`2WZZBUVcf$s3c|dij-I*2YU7m0m!H^SNdgzxWhlZuEb(Jd%mca-1$yrtw)SHHo z;i1~}$M+Ia%!Q6#hZ&%<4-z@%yJ1`g?gzb0(aZgA6Dx)DuZqNTWAih;xfN$-X6$u5 zp|;XeB@w@wweY?GLCw7{Db5a<7!-;hVS5JGiqoJt{cMczk_r|0lr+i@=sysD@9e-} zd3tDzR7})Iuv2g{tK&<~{2Sib;T7P83!4Io?l^%oai6bNX(gWD)Z!|-5!(0rzG>@g zsBf0%vt8DA#AxQR;-_;LdMwgPnIJ_NTLZ=X-X6$z6tsy`D=wLyb8{L(yS1t1D(dEY_~%{!8PzL(h?J?aqt;WSW`TVU|o=xf-nlw!$v5 zLowPcElb%I$>Q+8U<*-u)dX=>X;c^)moGQ*lY*T1E7?YSfJnJ_D;hp?Nh zPOLp&TdwurYHOfGl2SS>g}MiCesZ7NFE;)vsaPMvybdoZpbEF^(*9e`7&tq&q8XC> z#5-N*bx(;<>EC?3Kkw&1Sk8Yc!(nRG1R~~o7?9v-Z)*V<$IOgw{>7n{o2Pd1$VnT7 zEMcTQ4}AYL2`f;b6kK00P&JT;{f-MQ5*y3*nY@CnW_8x+SD{d8)^lNx&AYNk=$Ac@ zlBSpUT3=Da`qX(0!lZ(mO2rX%q*B1Chy1vK-?|?Rm9Fkl7M{Nsh9LI3NoZ%VioZE% z>#;m|lU?e1nj}DO%8YodteRKQE_P=)5MZwMzI}gZv=Fr}x!Bn5ZGue=P z`w?Te$NEasE-_x{Vn{htQpyVq%K@UGLW`Zpc+E$;r-*uc7bc6;a63oJ=95bm7}QSu zihMuIrcJXu*?-unZ=RX$U|h8VbF}PA5$7&YykT;-2`cZJN40wE3kr=zwhtN*&KNQt#suv2H~py8j;(WVTJ` zJEGq&v*}AW(hIHw^Fa#)3OF22jQm+K3z%lAJ8fruvn!nQz_7-<7u7s;C)t@FH*5j2 zwL@G+Wg`2tr7*aVP&dRwhLEnk~EAR)Xyolw5nWL6>kVqs*4G~|vX4{_=iLaqq@2+xnCn6&G zC{`w6iCY73$QTr%KCtA(EvHN?qF-Y=J@)&(NX0ceR_%P_FF(8g10(o7uE|vYcqGF` zz!1~)N)OasCktUgalTHLx(=LDo%4Yz&Ox)fFsjpAM2JJc0dst5(K=UBE3bCH;acMd z;(XPw@A}-%5vZDjO#qcO6d<)1)>|a8Hz%TmcH3Ux{UqVHJJiy2pBATa7WrL3%&V#C z-oXsq6a<#~6^oB3t*s4oo;esI!Pz1f`a9Pq#t(^;J@+^&!FgQ;jzy?sC5h(9YMY<^T!%^t2P5Op#U4+DPr>3~dl2Nb_OxxgwIxA<<%VGXE3&VWrDBSD(1v{|A8b zE(h{|N4Owwt4d{MWz}tOwzvBp=00oY#CnUv)Dct)I+{fCQJMfn)8C;c@j8J6Fo;Ib z_ZWih3=cE>ZR8Cq?_{!E&)s;KtBLD6cKtA`a`g>hsZMRhWsj9@=yM z(neLzSxQL)Ndk@u`+gEg2`jpvt-WL0AFQw*M@c&(*O9)U+VWsF-UOdb^>2u>6KN+` zKRZ1MBvn&W3tUV_Kyc$+mT@LD3FxT1;u9XR} zP^W*NeP5=0F_W@4*blGxEsg59lC{~Z8aDi|`Ck5bS~^nT3QFHagEh_$IVrE(M7JjJ zRG7@42QEb2c1BqpKiD_sxzRz^x=3on5xDklf`JXK>q zwqAV4MtD*l!KV)wV>adt9JLCyr+eRagEakVd;O~{jI>DtpY>*ElHi|?N6t3xeV2x| zL+WQbD2<{y8T6u~3UYsHxA>obWo9;Xvf2K*ce>w(X5W?W#KhMKKG2oGE4L_xFSnbw zgvo|q--t&n=HdhuQ3XXQmX!+4~fHRQbB9!$HTPpBb93jWl&<4= z8+rjV&k`l zMNmF!Z=qr=Yz-0LrIApI+v@H6J-rm0nD5K8z2LAg%8$)jkq(+EV6U)LBLADOGkvn_ zaT^q)EJ|>+kCil?d}P`zSCq}@hT>YL8T(Vtb{1hKExX&!uPH;e#P93a9z;}E?}&c5 z-s-0m$Lp&HgYKKgnrvWYE~b05YA@#$VJpB!kvdKsG+LF~0wEZHp|wY@gs3vv?e ztGm1YXQv;BCO2T1Q`g;UhHJ=_O(ZCUYTgS<3A*DeZa-hmKew{-n*&`d>RaOXvgrgZ z$aQ2nmsJXDk1gt{H5>Z*sQGRf%`^BaI7DRXeyV+36ZAAVd22QEs3EK__fUwbt@2-5y`SC3@4zGkUNf_fCO<$b{L zwXMF#YP1UFd|HcRqGeOR@Bi?l$6n`sTb&VO6JZydu9=MzyVaeJo(5mzI0@gafeRw# z!mO)Kr+w=6jkEpO}sY7Z=7{j0H1Q;$+4Z`9#iV)u;1Gj?Q3^*jDs1$-1}wm!27wJZDSGLzVXHMm4YVv@|lM z2m<31y_rrMsvP~st6uNXRw?pHoIO?{+}QX=9_oG;iQWRG#QzjMo-D4^hkuT;>nvs; zJ!#gFaO|zBx(Z7FkbG8~`}1-U2EGe3J=|wXs!##P%9J)JJuH`I>3o5_(68Y=;9MNUgxt};&uZHPNzt`c&-&>^+7e?T= zt=H;6m@glP^A$4Cg|AH>oUu^&+fs=yMyFHp`<%4T7k1Md)UaRgY|^T3_C{+ji~OFC!7J_=Vn$$*F)jO&Hh#m!Vt*by4`r4)jaHUHz>Br4pYn1 zw|fYRMXC547qbQVY(}yT>dzb)swZKz|8m4`su*>}oz8KnM8e_fS? zIc2P&fk(SY&2jurnVYn%DxyX!FX8yuWqWN?Ot7koJ$9s^^EnDzJLxi6u&W%!%G|Pl zXnQDNA3#q&EMedKYOB}`L+hKqx$WwlOK^G)u1s;<<9BwE_jE_Xz*hN_5S!wvagKx{ z$cx104ScV2-`mF#@=Gm`8s8lYuM*?s?K@LS(UP`78{3wR=FuUd9kI8h4C~$~6Pj`- zfrNOaBl78kTrBgR4g8SUuM>4Jhwwm|xcLg1 zcEsQQJ(EAvkQ@EanFEY5fb{p<3oeC5DhHws(2l#S(qH%7KaY)vcZ;rK>PvWF)beuGZDm1nq46X{upYo>^~!+fPww2a}}l{xMgs5vNeD0jDmuUo*o;X0r#32r>DogE=mLa9j@qht%d?n5N4yVKtPP-9OFu!4Ks&MWdt<-l zRyr;(4^J2)mXd7n0ge!e#$zw9Y*;oCT~~8c`SL*~{G?;U%(F%-hb$8!IC{PhfE|~jHQOfiDOB{R6&@5kGCbzD80Ek3oyWgLyten}nT@pdFLnEk+tttgk z53|6UZ1-4G4#?_WM4o@Vcqj5l;wydQ{1t0K=CrV5|qLbNMhvDvD+AjHtKCiWe&27?Q>puRcO3wdLpV8DTUDr*5r(kk z{ibbRU^oj>1cgp!N^gj@fa}jH*IL5Q!7+d6)}Fwt7etbvlB{{)eQ@panL^PqQG(jV~tA^ zq-S8-}#y z(!^7d4AMGPyucfB8Doi@4hza8r}Ok!pyKbZu+F#b+sBkTolSnh-+-m;nwxui6I`wH z{^5TP*q2z&%>9#UyWXMO*xOPd#5^s-smQ(9=v}7(w+fg8a)t_;m_l`gUxh?qy%FDF zrU#p|yEQ<W^fjVpUqrv#A>)muuc96xZ_8)Xti%e; z5=@jZ)(O)WXzU$!Hn!;$cr$UE0>q$vfH6!pWfFD`F zORdIcfgJwk4wp$S%1GEJ@5gut^oCEBPxrg7qss*8WiB5BzS)bv3uA zigYb(t2W7{s0CL6wsaz=&ul~m?yEfzIrp^nG4$M|s#yFv zQ|_NzL&p-(z57GI4lWZMcPbB`YrrVRJ(!Ymk{*!Cy+%q-hLErthrWU%2CB*Jmvdgj z-|6pNrg(Qj^Ydf#8~7du6pZH744gkY>^pHKK4Wfc6?>jb6N{(HyyH zdGX@bG`_lr;Q5sq<%w&YI!G~{)3&JWV(BfBM zOG0Y81&H!J#p6Q+&o6Y4pG&0wAtR^I*d1S=_?747HQRgi%0_{rk-{1l6YfeCd&iqf z$<&wLDF7PqU3%`9RgW)%02katTfIrNpeaLmRQH5EQs=wuc-9T0iiE`327PxUU&i_} zLrnv=DTr=>*qzv?QB3MP%&c>oai*xNaPDXJH6PGjQ{TgrsU>VHt)s|Ac%I`2_g{m# zRYgZfYlt+R)3?~1X+1nVT=fdpu;7|n-mLikJ5har8va1$TX5obO{{$DvDEp8kGVZB z%Rc(}aw*uI>0z+PMM-AvgrJLgGF81qcM6H(+GM;M6JJCb64=^3om|C+tU03Iyzjy% zk_iY*GTf?Ao3rg`7LuOav3%ufjo3;}dk>9|8s0{yAN#Cuf#jFH2cLhfi&KpalN{{- z`Zz!#m^@+T9d*P0a_mNQ>`F6>ZY1Fjbh3|dX22#x;Elryhi~QESt1R&KNFNuhC$Bn z#1h$d#XM6|EusGP*rqZrbYm}G6r=A!J)kCPll);|Wbvx*=O=EW=cQ~OW+6qlHV|EN zqIG`l89H+&tuyg_1+Db0aCKU8llZfA;jHQDKc}!D0C;UDg(X6rv6qQ;l;2Q`y#QnQ zW=qMf>vdt{a|c?6NW4=Jm&zFm+ z#vpe1dxLqF_+2R-R;;_X-JcbYUjU5ATakY&jM?VG-OD>|?a#(cLf3i&x4HRG$R2-q z@#;e7qlXRY!nPu*tshI~i4D?bK57a+Tkctch`wNsUKb`eHZszBR#lLJAMYu}g{s^8 z0TEZzhAclt2|DT`^B>1BkrC%3GJYJzhzN=xKVC9{`1MYIFiVqdOBf3FaIC~x+|q?P zPZl+9hA5PL!UOmOM%MvV(E-)(gkMaw3Ud-SC^0xe+?>eA9;Bv$f&8 zZXY{UF*f9PIp&V!y0$^(+9(4)~FwEFIe2iSF00LvwA$ZoEH2z zZ2y2`<2$1sW%STn%GR0(JhD7D7Xq{x6z|-+DnoaRA0aRJh?LM50B&Ga-$(2MKIi8z zgImnz_wnVP1l5moXO(~c{E5HF>>3M$F@5C**9lbX6AlBEAIF_oD8(WD8IGPjO}b0O z7y4{K?^s>$%EJpA8yhD_dsuKC^y!1Ago{f{r$@MYnTOs6cs}(nkDl|!Lt-P~AtsGL za^s<1UIOe8XJ;E%2~}D;U@NP@lM-Z?Azu{KjNX3l)7JHCbMDc~35m%GKK|6qo&MP3 zzGCxDqQDvm4qRw`^ubi>D=D1}W3=N~<=5Qr<1#&rYN$8&MHwOh`04sLRJOBThzzfQp~)((LY~yB=RqV zXNQ1W(D7OUt1=Hr%^8^zbVpi*0ro_YezXjfV$ZXUNJ2W9cNc5e9=(#GVT^GjTYV@f zC@3zT+G*L`++6Ru#mK^vTU(p_^g+L^-owb}78Vu?3JT!Hy{4MlRT7dotsnM`q=Ynv zRc~-p2V))A9h4BCA~$-Rus+MY(#C4sZ~8~R9oCubX0dpk#O;7 zAKkff_V7VNpji;=_Vnsy5|aI`vH}%+%6nlHh!KJ7xr2iP@aO{YvJ*I3MOOd@h>eXU zq`Pq80yT9Kcx=wm(GeRPTTo!&c$Ev2h)9ym!y{en2`{23QXv9b>e$y}iAoBW`-y47?m2yI=VT zw1C~dLI)mPvN7@VeNRshV8T0h?nJekUvCTv4XvxITU}kXa8a<2U0<^3pT06z?p``% z$6Z_^OK6c*TjO;Y@U@CT>h=G@*joox`9}Yus34#sE!`=tq_kU7x}~KXq@+tqL|Q;v zL_u2VMx+F!ySuv^?&5pqch0?k+?n%?!)zE~@ArA1cg1IY)@QBjn<1_QJuSP#^%eAg zZi-0Ns5$7cAQEVI5U?h)7|eQ1sx z(JCV^-_lQ8gGnx&?H?T-U0GRKT3Q+!8d_TFTS(D=55qo1HBCP(VxfkPvg_dTGp!^0l8t%B0<7bw(JwO& zi?*5BX7^n++oZ*CEauaVj^w+@NI7nHK1`a`rEhpRP0+2V#DAO%^ywo$|7>cp+k{OoN~z(bCc~JUk5B4=+YW zPS&R#OhF*Y!lLqZ!|f<3JY3vjc-BEoBJJ7EIo!HO*ERhERpqMkIhTA7$cOAk{?>b= zyW;LP+4e+EU7`|cYlRMf2>5fNp;WtZKF7<-#_Gq4g?6{{V_?$_KO&M1tQsu2+H?Gp zTRI9B7N?EJMPwcxqXH#X972(bjPQ~0vZPP0D|OfsWO+C^ewLLT9UPQ^P*7P}SxE_P zso4vxC@H5mZ{E1LhhSWf)9>d&L4jG2SI)gN9O zT~c=>N8|vNAIA%JWPBo&^#T5=lu(5fKq9D=Sn~RJT*| zv*^V_`=g%JuNO5@I1wEOcu}NsE z$;nKMsvG=wrT7c-Up&5cw9QiNq#sp$E4MSoljrTdIBYw&2lMV^>+TFRD zynp@jQI4g#d@?uY^+#TbP0ebj?y>J^>0!a)&-gIA2dB7EINw@Y8 zUMqfzKR;=S^=_0DpQt@O=)xG47%Y|YIuq#8a^$@-Ha9puTb-}Ta|(zID!6(*Y`@iS z%H(bB;y}Kh8ubaweG!44B}$98MwkHhNE~5MDD0IqX`iL8q|`9`hKq~KnCynitVnTl z9cvmK0@b6H4Ksk}XKz*0EXDwWFv`IdwgDUoWPjf+m;_Tf7)G5eW&X;EcnAJM`cB^XDBR{V&(T z5k4vD#^&baptfn%Fa7wgesyDHR8*P(OG``Ji3)5yJg4O@GMtcwxw*O3)n{&d#+4Bn zMDVeEAJUh;&3gYH&Cbrw!o(1%fWe%Bi~GAD8yjd=H7CUqJ_yRASyMEA3IW(NX0jF4 z0EAyfii*5^SO3i_E-5(xJr#&jip{z~knLA+aBx=E^2jfvYL`^tCw#$uC+vZA!18Y@ zNT9V7{OHq+yu6QCIR!bYszFLzDM~hKu^+OPY7z7?tIId*`0DZ;+-N{Rfamq)u`GRm zXJ==B|NgjLeW2}kxcB^bYkKY?_4pH#kkAB7nSE>H$Lgs#Ud^VbrFcz*NSb8aKL z&)iNywy4Y#*UD<$*Vh-MkD$`z<8uunXAj8u2U=R_DJdx-UbsYn6Yt@4j@gxmQ*jfs z6`9D0iu&Rm)8tRyM&ie$phyFCvSkw!a}vTf{zp3r9t2k@f)+So5>}06H_vE0Xh@LZ z;T^P6_*hR?;iuV5cXt;ic>5w9y!kpeSM|f(TPK;4Q$F(iSWnm#rr-f8?>$$1A z+|Fs$iUu-qn5ESV3Z73ta$dgvEr2A-8@%iG%H%sGbM= zu_O2h_)9Si3UNbI{2^d`+bnnN%WG1RobJ-%SZt>X&9_EStaDG6U5jc4-@!t>@19Q? zyX+`g?Z45piG{N*W_GidLDx^vb%&CX5ww8hI^x+O)ZIHcNalBD3T=*V_d{Cw@G~c8 zZ67$)lMG3p8Cf+E>(iJ~vmH)7xafU-mUh$Gf~0?IJv17o(p1`ZY;4i0sG z?XAap-Cw?ZflNL)DCk}wB6|bNx1g)Bv2pM)sos0^Uv6%UGYb+`Ko`ZRzSiH<&0+oT zb!HdetDZl9PWo^ef^RsbBs|tV!^78G)!Xmhy<=i#{_ZV?#QzN3G+jsBaII@LpAR{- zG;n1-kL2?s54AS@TJc(Vf0*pO6q9aw-O}D7yKP^{(g(h`8-#wM6(I1~_@{%SdjV9Q zkZ&6R_8UPiL@exC@%5`AL?y&qFB$PbX|bobH>%Az=f{tO^ao8omBT7oDWDkIflvk(yxJ5ac@5}Fdt~FrT;7Gzl1t$># zuFm9V#dMCh?)0#NV3=+N*3-8Wd}K5+*`aYA{K=`zr#Anlpc$V{htF=RCj8mg0nQs+Gs=%aJYf`<)S{_{19 zTLuP7dWYKp)YR0|6NUp0Vt#n{=$<`01>bY;-aUP{3DdXL_VX>+G!jl%mDDGm!U#qY zk?WP-3}~w7;N(o=u}Lf}EX>Z%&U}i7ce+yz>F!ZoM*G(v<&B@Vm|KNa>*Os($-`RNX4WDE<>9e;N zuJUioYTkW~3jF@=!@a<dg5;P~pQ59SpgIngY;1HC1nP1fKKfCIK3?xZTHzD9*zt#F^g2(| zEn$`UEqSDh;wNV@A(SA4;jU!Cqjhx={plMa#a_Dx{*U3|UD0%)AgLK08+!=*6BrNx zZfIp~O+JyUt-s$9L{U@HF?rIuML+>g~8$TNhI8zm=)Pr&wLq&nhTR_hyQ8)K18 zikySWiVCi4%odQS6sL|#NEiiCOvoTX$Bi|1zO3Ii2?rbdd1Mgm4!0v-&HDU!xm`R+ zTMiEG2W;O+(Y+O7N8-l^mxXFzuxkHnzn%=+{TXtQh_i1A@{zkpw~@f@vg?cewit$N z)to$<_PF*()n||gZbH-E`{MmWtKow~lWUq>52EJKFg5fMg!ze?9BmH2bI#FG%5kHv zV{rBBl%jGfq`qb~BRaXHj6Jr7gL=nza!CnR!&oH7j~s9rhQ!6x(@rIKThC&Mr8eVf z85kIzKJ6=Bl`ql%1_mGNrGi2{%L8CDj!*qH-unp(xfmotSUK6*tAn}2z?0IiaqV5& zsjJWyJjUe`;Fj$0x;(_|0O8=0WLf(A$hSE;i?da-x;{T?92s%7+iffqAw|-S0-p`Y z21I4m7X3@p8f;&m5Wc+={Q1@B4=alol68em^A5}}Wu8XI%;||OEE{W&k0hU+Z^JG- zhnV6*CI!d{#AXn}fCC3vP{;M5d{>;sQeUzZh5%x;tF-B`{&$(jq`(nMkUE}&Tr(8coPJ_%J zNp$>^=BwJ2>ZZMa_STbkW93NxrulR}|Dw2%C6_-^8@#*8x3?T~TWS;2UQ8^Yy!?3B z$px}p2{gcQ>}CK_M19D@$jApqV)4)L1<=@7}MuARca!icl;o9KO zpFfw&3mxw9#m3?nh38sj^sX{7F+q6gXxeoGVaeSGgp}*=$NoUh+uLiuJyi<_A8bB| z=71EYvy;=o;o;%Y(GY}6v)%wK{aQWkT&o0;hJ9Ea+P`f(S&_y7=UcX}!@(;&+fRr$w{iDd||FD0|3NYq8nSD9li{z6i;GP88(klL zO;i+7hVNNfAxFoUGTm2t}HxBLN;Sg@#uO`mrw#2 zjpU9(-rmv4k#;$Rt!=PqA8hq!;gOIi`ybRT)pwLeP-MxbpoHMAv;}aTRa+EWUkfUh z&1)H0AwvbuPkW@<(&e*BHmOgUX)NvFi74h~Q%F~2c!-;>gt!)+ZB-ibVp&JeA zKY{o2^Ye)w1XWd?MN+*8Clfe2bew?=zzK1h{euEQv76gzpRwhzbPTvPc+1^L$doW2C?PqJB+c$qZSynMNzGj5=eg`=Dk@R7Ta{ak!7FzQ zt{wMI?8fJ&>Pmn9T$y=s|Ngy~-a2_jUINV%8!C^3Vr|hG3-gV-^#)c-AfXja>`o(` zsLL!ncZn`5Gj`j(K>7H~XtC}XE8eL90A%u4Eb1~iHx*YKdIpB&)m696vC@kRPfyE! z$R5^LRzScMWJ@u8*iXTh?=;9G(m9xzKxVtZK;%sTR&e9#YL*SIX{Ucvwg(|^v&QgW zsWIpCp!Twm(7G7~*9UB`@x2L{W%LTwqd?J6{;B1-Ku5yTaFRG+eoHX1R9gJr`0~Q?o=R7vuc-ZH29+)>b+*fyN7bSp~$2o zQz4BlwFtlQ=RAY_$J7<^3{GQgZgT<>%_G16;R1v=N~{qQLW53G1||y*2r`Gzvdu7L zy?2-*ctG6O8->)f8k?ZKr#e`PN#U*jlKN}+wUf;M8zZecI6NR(TCvRzzFsyk;;mSix5>qfeN=$c)dTIUV5wCPl|G) z-sc){b3izv;^R?`@NSvg0f+!T1e&(&4r0{_gHl9%yn$-~)C08ik&%()jSVWJ2?73> zJ*jBO@A`SC^(x)fhWsd}a?R^w?evkB@bjlRr&`KM853=7k7;Sev^?(U z9=pfk6U4r`*Az;f9vPG5cwIl!^jQOyZ8EcOb<_O-?`>XHS$I*7Rz_(4q#%HND&3+X z*-R-Tb5rfikF%{Tcr~(OZ?s`%d??5A`P zEPea_Gb2sl0nwCq@RB{W>$F`;R&iC0jCEoo{DvF9;V1cacgxCem`qP>?Bruq*sKsU zLCzsaz71&|;AW7?jgF4O3lPl&@D5(n(NTaNo8qqlG;%pSn-=>GS6s$9#0J0OjP_cBTK=-cdYT{`j%% zYk(5y=)Tv~kQTnk=v7ry1J4Yu5$X`|qUoBKi%Zq~Y@8U$JrjVr4`S3~%XYe-0t`M& z(c+*XE2Igl^$*Mq4fPNexy4$#C~K*%tZ(@GTwPwMb(q%ITH3h=zx(=?nyAGW3o$q- zh$2qZ+QaIUQ=OZ;azPNrUg(SUhlx2c8!F9_MI`~<4h#WUK?VuiI9Lc48?qYO$4iGP zk;MRiJ2*HlEZ}!qTW7JnvK(D*>^T4bEpZ(X@XFp)n-WGxxU1enxR_JpekRs5nY7*C z4^NQV^c6r?9Azc8CBoz_OEe6qADzU}I^PoMM8*v6cpmFlnOII%2mT_`eAQCx>Am#3 z>r+L!Y+O)>SYew83uV+_oBI_hJ*!Tjln*X0C>N@A4iFyzHnhKATwJ`rx98yCkP4Aq zQj)f!BIT95F68ps+9Y=AeGmr0qtn;d2Y@Sl?dhS6neBeKqKSOF0o2+UWIy+Ka^LlT zp4AW8PPYS_2&-_{n7wFg04o38z?b4^_LVwBtqsdYI5!LjS_5N{vY6jb%D#j7M6_Lb ze*W_si;$5@fFfBr4=>H-+^^pY6|dAV`>4Y7U*BMZf9?`RjSvWZb-6h&J!F-e{(FD@t>vKvh- zLz&oxg9YoR7Ev&_e~N;FG8b^quJWb4y!XY4{oA*1oezUgh0u}T1^u_e!^1N(=~GoK z%*`S5wS?lAg~f9_(Vol5wuQl5jq;y+@F4FqSy4gN_JDCd^CUcJj(MF?3)z8zQm%AR z%}tYtFsM>{O**cBaY5tK`~Axds(^cr)0i$K+fUm%dM1=ZvbEGzPo~>M3X=IBU9T;2 zly74r+U$R!rN8$mU&;0j1u;;Gj*aP*_aT{^iGK)3Qgs zJRCU%c@@rE2cs`M*vLip<`o1;#s$^YUZ})=_rrqnoW=;zTz><{1|XsP#hmfh({i*? zTHeYLc6ypMUc)zAGv&KBcIiZ}7PDG(on_+xO%edRU_;kHT4mauI~b$AE$zTnux>9 zRP(+`bKuxr38;^Fug?c-gnIA2>A>u7)`lt-KKG&C+lO2ImvkQYq_$XB?BPPHDRgR8 z|2-agGt+%Vqg!y7%*SDU?EFC0RxUpWLh)bH`pdI4bxH59v&GO1sC%KP_oqNlR_3*a zhU(wPOj-wJE=fF&zu$LDIDqF#9orx82tKyT$(7Bw_KGj#Dz`?%4Pi^+bKjpxDEf+E z_42p#x~Mzc1ulb?7ei~UCw)O#-}s$tj=5@dW*4%WtM0apah};Mw>71K>1rQ0r^}~l zt?Tm(7xfq-;GRS=$p0+sKvGTMitb_s6ScoFtjtSsOI0F3^aIkvbk=c!S=0a zTK!XO&W~*EgDp;jAswi;xuQfVx^64)WUOC~VO3Uipw`6CI z7gu?oAisB4o;5_@p|KeIN!V4**c+RqTVSj*OOVSJl806KbzevpZbuqScV9eLId1&; zL+dO^_R;!Ccs|Or(TOo)dlNT0lK`x=){d9*a3Dx9$%9H#bBrEd85f<5@cb56hVQG}<}Z1c6D zRDNI3N&V6C;8W#}6fpACG&Dj&Lf=ap$qPdDx(ASX5G$=V)h(`PTv}AZXX3vnh`7~a zF*IhS#kFp0Q(`@EAkt>HKidlmv5rzSr%z>^+ZV4npY{vOeQ2$7NLAg)ap9j`YD1I6 z@Dt^oM5YZRGuwsp>V;j_$y~lP>ql0a$ImITDio3)%O}5S&&fffs;cn>VY;k)va%fM=j}UD9EXBye$^;&?soBU6g42=H zeW?g(<9mz6wXyMw-?FwYmZROag->3k=ra3=p zS9gr48+TL6XFoaisYFb<(-!_HaXfuj&Q6Vu(2yr{Nn5&JU%k&t^jS|+j~?fqd~iJ* zJDb+lVpnc%2l%1A{-6ZrA)obmkM)a$nCdc`fYhm4O`XWX%ZX;C!ij zQ#)>Jr2mgm>A@T?iSm8+u`K_xr&p%ldaU(1tW>2FkEyBujE|E{6+dRxJ#f!#Zlwy4 zP5k^12{+_zq28a|dHHku=Ex_u#QarUL8N}7uNG#N^_2gqno$Kr&l_SPx{M~Y>J3<; zKbt>y_!X|56chIj3kN+ABSX1=z}9+S!9S(x=@i}8^iRXBptL4V(UP@qXZ$`qA_8fx zpZ`8E;c>w!lc7Nj6xU}V&99te*%1m`dm27qW-fBqmI2FIkOb(S^1?hG&Q-^(t+Iq=oCA9Az*UwLPR93aTf2N^Il67ZeH3p>CbHRZ7FYM@E0(+dy-EzIqB2T9RPPjHZqrh!FhUx0(!UAbJ|Ge{ z=c;?U8)0Xbm8q)n3DDFw8<|kP!y}1IMHY9TqsypuvF*^-h}YD;b%)GY2wEJ?;gN3< zF`QG(Nij){`KLP?5A8S$%Nq~AAqOa0D5Y1#P7N%nFjr+e&rg5!-2K$NdAj$3f`8|I z0K!|BOu9f^flrlKPVhXu;(nwnze-S3jHZno_gcfu)*lh4=<3jzBo*z;S%HSSE*j!| z$~r?+98AtRRFYe1X-Nr6g#xlFt(I;7+elw644cGchrlYj@&LIgP4#@w&EqY-~(u2P5lC|Jc=hay;gXBrWEP3j+eGrbvz7S--H8 zDh*RoLsnz7hx>3wOq#_J>6GB6)!VDHThJ2|#lkjP(M-Jjb1ojYyTNjxogoK|f=PGB@YIA}sw z@0hTXu*2_O8DH({D2~4bY1;16($L)pV|V-;%-5RLD{^v}YF@lnQo^3{Zk~Pf;lGs% zGYb_J6(Oc!?X8l^=zYZ!2LN_vW+r^Cs^Xt=2qc(e)ym1qA#-r}B000~I-1?)I`0cu z!ArN`_;tbryv4;26gFOu*v`FMNKxn(+l!Rbo`+#RwZ`+4aspFBaonAsEgu#!{>#`_ z(_YE`x%Wh<*`?&i%#OBs1tqBL5w1S@)tkaGs0qNW>%1tFfNjONYmFdx>SRm5E&c1Z zRj*NPiw2WoDP7r12f5hd^Nfhz5 zLDl1@fnSZx2f*Jf%=tW_9Y_qNZ$A0{5ON*v>|J{_sti53qarHvRJDHB_->Q{dD>;&1;U&`i@+D>gM%PgV6(S{nPaVw42g zM_Fpoj~`w60DS;L7WhgWHgvU&TIK&ce?9K1NJIYlEW{(O@k{|dZ_0ORL!!Fo%gpA?FeS}rFo%NB3Uukbm2%S=su_f*BMkA=X88PAVQ+s@21@fNbBoa|t1z@@MfZ5+(LVy!sQe}G=P^i}$a zcecL>wWwuX!p45*LQB>#u93yM?L9#%%_5H1t$%#UPRt-#nG12RGi{VO?)Paj_Z%^Q z(P=GDiPK##dy*Ja^mCnNYP@Xzx`fu3{q6&PR*3kaFke_WElWlo5uBxV@^2&hu{r=c zy^_&FkMQvD1_lSYVZh?RKofc_jGlya`=l5ij>Y|iJW+*=A9-K%a)eu*e;TI;Pg{G6 zc|*Ea_=kcjKz`%mw5cl%8(~zFS!Q_+2{!zAb{a#vucz0Ml>asrm*e7D-oOHICX43& z*xWZA%r%`lZ~S=MyJM&>1dZJ_O;=mD)OqSb09KTkL&{2^$lgorSo!j;@iekSQYt5h z;kTYT-kkc${-(bt)gu0F7bJ8Hn`C9yc%Pv){wxkYde2U_J0Hq#TA-7*%SN@exIea* zleXkDG(P>Op!=Gqgu2q^e+B7P=E@)St8VyNz7NH}EALv|Nh+j2Ma(Lr&$W$0I{l0^ zT+#4pOQwW@bR4)rsb5vBw+#lj8o~J;v!ld6vM<5-IFZ@)CXEMIztVKO*U|5sdTWa1 zF^l4GANe}><~fmwRGrdtXxX)wFvLP&koSu?;ji9{&B@N19(HT`_H2LW*B37S`Q>>t z3cdP^nYHE;Pn@ms7Sr5&NaKzSi|kgtm%}p~-r= zwVa7&_CT5~HiG%>*0q(S=tRe|+M59xg!g23OQ#}(7Bjw~qwOQqBlNGEr|K|v< zQcJA|lL37n(Dlejtn3YpzYFeLtlsajv@&_-UOXrzI{!d%rdIvd_F5VAKEj@uQ-G@Y?qn{}ao?8@@#f{m6GlPZQ>bmKO5 z)J1L5Ov|E?T?kez?pkXVlvqFRY(v-28>uFs`c|^huhXs!T8zobl=jhIyWR*%zXxm) zOG1=GOxSb3jtpLzyPh+#>vA(CFQ?vaTYY9afOI0i=wGvS-nD>C|Bm@P^w!+hMAC|tUSF* z>KjhxUa<@~#RX#ad@HOsGmex7U9z*s(@FU!(_Y1*rX@qM*da$LCsDZk`=GH1gW> znoQtL+pBnvbJv6q4Cw;*4OTMm zhvuCI6@{aeh(FAZd`YRT%xyx3w$cl?0ajR8{lwL%r?&dbm-p&ksj4uAK<~f4w%k6XAJarwSUJ4w zb67#r#&YI#za*4sfxGuxLn^(7qKV@hmawzuAC3p~;Yca!9pgn%v4rM;<~B7=&4h(z z$e#iNknPi@Xrm{9NUQ|6dcss~`$a7RLTPfd_T_z`fVs^zg#cX>3dA)Ha!N{1`1no$ z+jWaA-LzMvWZvfNV^qxvl8-Th;kdeJ(4m&r6Y@#fAk0`=KZrc^8{rE%7{dsSMm;>5 z-~QkjF1`w!jSkfLsKD^&AH|mr6(uDLH;r0T`rp5`e?`Z{G>7W2l}|0P+6`E?5#|Hc zRV7RQ5+)u(7)gtR-ktc@xTv!76bK!%proA4*Ly)O8lPch?67-+{dVBKChBdZ$lHHY zelswMN!%-xDhhay#;JPuLXXr^exM)EIruq>5LC&gdZJSnW#zKhq8I7@5aSzGdqzAI z%^KWWVhK=`lMAb8$kB6rbVbF6mZ|5jJ-AY4R*get+rWWoE|g8rL^=9dWD=?lyLIhDp)3^D`oeYKuZs z74!`isy7cB+$Nsw59^IcpH9jop?gWHE#Mn4W9K!z6PKtbGb5++Hcun9i~Xj_2nPpA z_Z`@QhniavaJ>F&<2 zd)ZOk=FevKF{>MzPe4@*h9rLs3xh$J^UKS_OY-SAJTBl{M?^$GS=kAi0l6?87?++Z34v8#oN#A&w?MLQKKPA|L5b{gyfmAujC5KjX%np%&&#j~^wEi$Yz!PG z;CCo8eu}F}B82jBHB`!Vpdu(sAG%yc8Pi6K1C_H|cH8Yh%Kh=fcOsi)9Og^Hu*|;x z{^ic3av(|EP{dyS{1R9m`R(N71cqXwAR~{Ajh&sJdw6)PudS6y{zme3|3Y-j^DDye zm7=DCrSf{LRaC6{kBEPi1Wn5Q#yEuLm4=4m$YDYf4Si~L8ntRl6C2Q^cj9LF0BMB~ zx?_N>0o5@W)q_U$G~pu_)O(<~Oh>l}G(ezK>%+`YAi?F)pltsA^Jfl2*b@@7U9!U`!oH01R3bUi)2LwY^bNw6GhPEI92P6;K zIP#pY{}>XsEbl*g0DOhT5BSVb^O6iB#>c}`)zlo7-@Vh*c+=I#Db^(xk%yytR5Y8@ zyZ1;W=b?vWo10X`EX{M6(3a>&^+~ktVMaYUi(Sb}FFfqQZJpB}{B&U&8{ZcwV|I{` zqTwX|X>2Uit%lwhX-X_H>Ru>eczJs6jM9jfXXWRUk&{o=`}owHVk>y33cAhB&h`TL z00Ld5T>0VwzMe?MBhu7@=dkz(o-^{j0R<<;D z#pKjxMS`877|8fb#-fVGZdevdU~*^oS;SJu}Wr1+2}1N)~Tr}*gjE8j)Z@}?{Bjvx+bNd z5HV_b0EsH180O(7@*}4{-*v5lNvrbX6}?AegJC4RLrLk#NIj+;8puDo6EnHRx)!9Q zPu^m9Hw}Itsx8on&1|2`O&BZ?Wtqhz_pE!d%!Sr z!0z=Yx?$KX@uRsyQ)m$^C@6rbSqU&EeGy1bwdX)s6#XzWjL!8=g0fJNq|(pP%glb| zWvNn_up^1$=iEU_{QJ2XR>kvcdrw^h7pd6@{C{?fqECPIjJK~F$dZFIt`npU;>34l3NI%h# zfsA_iUm2tiSbP4rdbS{6kn79(VLkljx|Y!icR46N+CLY;jw8#~Cr&$rpUmQ+$C#&8 zxMeBRALq=6&PEjgR{DfsOgt86G|zbqa#AjsJJ*!xyh&f3PViCj6Th8#HAYShR|c zRnH0AYVC=4Hivh@LNErBRa-R!z6%T+h{mrE1#Wmjnh=wgynm!=5j(bG?LFFQ3I^0I^#PGSZj(H-F=u zuz0gZWqqN;HItI=!}>hp`Q0b!dH!pwXzwW%z7hzY^weIv)DDi7Yx=FH*e@xvm7}59sT=? zE;9$uh1reY_^GCxOcoUBgzv{^TRI(x2%QfVR`{%5h+qTY*JF&VWpIWSoVeD0zgJpJ znI?l|Y?T1j3F$M>jV{E>7=(aL33m}vw0J;5*t#aDv5&)y=m6dYLu)fMhDjZt7EY}K zvEW^Dp7wHg-X4*DYkusUpB8l>S#Ci-jePQv-K$j~2dV3fG#L06eE$}yqON1!*HP8m zgkHKwfVx#BkK(`pZ}kO=y{~Bkf-1dygq|$B8+LvFJYd7n#gxP0*ryC|%lKgrkXyp1 zBOw*)Sn*y^OGJP+)GY7hx9Jxfo4D$cgl8)s;TdaBf(ku-})UAjn4D8O-;h4{lR?57u~$O zawQd4`TJ&&7*TIceyDNr2t0Pd-ITj?wH$gAeLz#Vwvh9m1FDBHWGs&q2yyq8)7RQx zw&m*yRs^$8@p-gQDsPWojz8WQ@vZdMyUe=?Iufe1s+Kw#%|k#D64P2|aead$JK{8R zw12PYDLc8yoNabI-oDnJQ{MT}wG%(JlKRW1mOC6;Dr#<9f3KQxTV&Uaghv|;`!_6B zLw4s%J5lS+Hv57uzZ~Uh!tVm(>mz$+Q!{12&f6>iuUf~6(N8W`7%(SU58*24kDgfV zMxY(AP>iKm6Wh7u9EbM*3%lo{%Yf_%`je0XXj^)cs-%23v;KgUGe5Ls`b}T((PvVE z+?;faaUU$ij~TC{d)wPhhi*Ey(3Nb@o<>o7s;H}t>LrVl8a#zjOZslOm$&pc$Hp?s z)lE&47fSJ2bUzpI@Fp|6upvPQ!7SvLKBy{1A~gwbDS!s8+Y+V0v~kxGnz1i4h0&v~aY zg_nw>JqIA@&fa<`&ot+v;ppXiPNx3z@ff45_|;^v&J5WvQK9v|z}D{gLTC#L!oySKHriW{^PcS5yuAPZCt z_ROC~*ogS+E32bKIe&==HAVG0ud6hpdfPC!N8!uz6Jv=0XsN};hVQ3t2~X;4PjfII zPNs6p09V4;(BwtBGMz$eV~e6ETVmB2fUZe`%l+CWZqCk0W(xBQOP(S|UA!?~04wD% zGur|u4Ek=Na&mFf-v#Z(N$7P&d0f1lUQjD)V^&w!`4k_!y1=il6HQ9c*f-RylAZ69 zf_}QclPx6RE#!5pTk4j%s2v&ep9%?~<);H?yc-?7eJ9qY{pYk4+Y%jRKaK z>GlGPp7#IgC>O11DvHQ3p^&`btc#uhsdPduoPRq?OsjhPM@9AIG3Qe9L@Ak_KLSlx z!0YRbSJ`n*Gjzw!(}DF-Az;0BUabL!Nm%N`a4G6=a*+hl3$C7-_H3hPtJyLx8}sI>mKH{eE{F7uJ5DBEBwhY?3a0Jd}o~w@STdnplO4IL|n`%FP>obSe zaGs0DNR)A@e_J^6vLAXJdfO+~D`CU+WB$+eC;pZ|yR5JoCR9k1U~ZDENBb<0k(pC8 z%T$x3^FPIS<^(>f0Mfgd>%7(r`t`^)A+CnJ=2F{N{vTQMo2_>9oAw#M(G>+E8h}_5 zJ^$&${yuc9mR$uu`u3BVAog<(coS)IaXlq$Dq3%@CWU36^>X)<&K9w+H5>mxj>3?- z(f^u{&#u3FE`O&k5S6EZ|EK+6u+II)9n6!@KbHIzJjueaqCFAfXmmB<5@-oN8}B~H z{e6TUIG5cghA_Cu{h@c{wR`jgDe;{G-l*!9>NY{vDG5Hf3$&&G2256Qc&b)~H0{UH z%F>S2x}z6{3v9OHgNtCRd07<~=_T}}_3xkP>@LHcVl1FGJwc;deqwuA_FXaimQU)G z%WkGky&rTW7wi$$|MpTnh<(FbOt58^*d&U5@-e(q%Z}?%1IQO1JK{(r# zuV$GzrvKn-a-7%cnHvxj+oV#1C&eiwdD9~<37ZvuLlFZwThX`_MY3wYCWJ!G=EYpDP~2Ga?TQCY%*(Vo%oh7s`L3QRUX99ln!sB@4tV+UO^sQq|TC z!=i#$?fG&=oS>`vJ)lY>*Tkozh*yBVf$aU7%2!@R3@-B5i4l@%LqH(gN zaLnt+X=uOd4$uk*N=xXbwcWSpDw7XXG022=*I%j#FUV6UXP;=WVH<{6&3L9B?NTn@_@!%85ktKAR;yw^qTysL2ZrOU zX`gB%k-o~g=cAPi+X@s@l2^&&Q~@xC!i-tFIz3!Ik*t3F(SF56`t-ywnPm0By2#9P z``%-LdisC|Kg*Pkl?yXO(b~Gt14Ul@F7dktL<&^$qK=F_bR?EW`&C9VQdqypu2HNK zLAsN2NgOJE>GaHTn}B59G6$uO-Lfv3>S6f5Ku=4Riwiqb-I}u({4oNWqocd5tc7|~ zB1ujQr$n6a*M@|%nb=wDJVDwTo5n1-CWF^CJYlqPZrVrCXQ^`4+ecOSrAXqNy*GlV z>+EA5^;dZbFQ43x7x6w3{jO?BuZxtJi2tGJod-=zcz>e4TK~n&idV**?(iL|n?tse z|6zL);9R1?E)%$#tq-MAh4Of_YJCe;1}*3 zrbQ60$2l(cHAYh-{=W8*Oux<>rAm(*;#t!(qQ9!l`%}NK`ttiwk@KlreF+z@RRQMF z(s}MEX&2@-zt&}iL&HLasemuuwL zdLD66iKPfUDf}yLM7$R0J6b<|>C#dkou(?N{5ZBzl7C&8d&iooB4t=F_EO3$A2WQ4 zmubpb2y4)tP7$_(BLsSR1oL7F$E!L9{G5Ur@FRrrP>WvWolq257Klt~biQ?zl9GD$ z^Y!!c&0o??`LWhypFV$K%hth4rl{FzL+-W>z4gT%29q;<&gD&2FZH zT!D}@9s6TWXFQ39^;Z1~%gsM(R&r$TE&i^pp-};aITORb=;v!*od%XBs00NtIk!=fS)Kq^|@nsX=&$sY#LdTyTfh=zD0)pEl52^hwRMsmnR#3cmAKo zzC50)?r-~)JBkb?WKI(anaP|I2^kyALz%}iWQs!Wq^QW0A)(BP5@jei5~56z47bc< z88hU)_UZRL?{h!z?|t5P|Dp3aowN5oYp?ZP*Y~@w>slT~s2Cuc>0z^Nnzf}>2bNEo zoSWSX@UKlHb+e!6(-W@pasuzw8`#Eh3Z+g-MKKT0zlC41JPjE*uGBSem zhKUJ5(Z&lSqih398wcMXeLa;@%<(d3M?~qvZaSOXb6#~fDV~mlW+eXfI`cOlZ(X^Y zN2K!QXW}|1-;m~PUuUV;r*1TD9uGK>e&aNMNH>;UD*q(ZodLgohJ{hbX@KvYYEW-k z@YzxFW_7Ba;X`fLl5SLe{eIN(ewq_&S*6g}F6>@1{h~Vjq9@(ftrJ6Qx^i4R!KeB7 zF3qk-xt6T0s#J-J@@H%|y3G8QgI?*BbwEFd#AV@*`jdr`_v_DjddBa2X$_-{%0D6T z{QC9l`1rUHuU0Sv^cvmW-L0)@x|srkg5aNb=gAi@eHXxt7i+J_KxSfNn0-{KA0RbC;2@4wXfpK+K-mN=Hfat?zUW? zAHnk-gCchb#J;lwt^@}BCiHD0kX;rO)V+WIV8_m9=`XVQ-8!VcGVR)VGd0t$ zV>o$vr9#~1!-J__S?s9!v{O(;B|ao7L}k4~aHroS^}0o`F)eqWosOX3BP~ve1nC>H z-p{NLBe4DG+fZAlQ^-JMu7Pehj_rm^!$SKO2-4XPU;KueBTPDC??T?qi&;Qv=~#ad#F24(Nl-C0v>Du4DLj3+sVPF%dG&ME_wG`|dw)I_z-ev48NKBq+)$-THrMM`X$t{A@tYh_qcfRlYkkD zKxlz|;|j06POgL=nqyXdaWe0LeBy%#uu8Ga+S%|}uGf7{MlfbQJt{TlbiU~DBKykM z^@4b&{nWH6s!uB}e~_HE$!^;=@$l&LEVe@z;z~-N<>ZLdpm~+NHd9llcHNDN3SO}7 z9q(e#g9a9YPUVL)^@l3HxfXTUSQd6dSP5Z^XFwTC9M2uKRqLhPfMFLYA(N5wJDVHz z)h~44Z;Eh?nfP4pKcu0TXIncvzmGp!&cwIPH>|1nSMu}K`1tR>#a0Vjk~E{#2QLo= zuZ3KqU=DTT)oRW%Q1JVVyKgc}`+75DbSjL2lw_itS%kx*M4JzWN=m*((UH+mx8({a+b<`doK^r>V+O zSHI}YcZ+MT&*Xoj5J~6^PD|rRh7&^O9z=%H%+b26%~OAExj$Y0<+#()qnZt;t0!NF z4Tmh+P@4B&99gmT>1&dD@JX+6bHVb`p27kvBPpG4-&!5J964F-h7?S*sfm8A#Ga;W zr=bzvA2Vri=FD_&#rk-Dg%CggkA($Ov?bPo(0RqbXe7xU=H(5owycJW+IM!_Ms1n>*{JFv0LsX{T&_n9fR) z65xwa`&Wyfc0Nk3+7h5(%sv;4r?;I<(dAfb!f6HTza|2`lKzwT)&+y-XQs+ zTanwB`s=CFW{WRZE&3~Fuax+tpAZTdjE3!7@~QY%MzJr~_o$xd)iTUq*LV0REMd6+ z9_Oh$iA}qnQN~sMX;{1MmM2B@>nrXn9Z?(Ss3-OxF3z}ab|pYbN8#kdK7~aNMd?_v zx0#JKMaHIUo$Gr9?#~ai$38yk@Oy@#EGyB2i=3rp>apE0w(*}T8OCZ344Rhw%5RH6 z4L;pnGB7&Y8CQw*InN*8PGssYJD8ecsRm!OIUe+3q__9tOynEWq@+XTOMhqQm)pZ3 zY3$D~)t2WX+f_`H+Ttaw2fDkQ(zW?6USd*Q&*9@sImmAp7e3-A&Z=~%vM?ow(pB}g zc!KboRqqYiYlkfiOiU(Y*7X&9&d*k?n?HXZO#k~<_j8uCxKymYfS&-ypt!hrL!zt~ z^v%2PEEsB##Cf?opJww~xbO-C}1x>xv%_3u$$2ThGk}{9--pT2Ix2oFXN7Ic$+rw+qZM6Y(E zxv~4D!eA>=tO(BpWvk`@%t&Nq61RI#NEi!WAs}`<3cV0F?^+Hs@X&ufK+*ZFxHaZ1NXUr z#5l8b1Q)Oqo+j=YKbRXuxFT#WT#&dl)Bv?3+YiyE2ZxX9w6^CO0YLlv`-5n*Y2yZ-;EYi$ zB!k`nIvK7)PagqXkv)9AwM@wP<*psnKmE!fyTR-c*9DXbWXTmVJTwH!Yg&mcr<=94 zweF7R7YEgIH-j8;T&FcdGa**<>s61t?Q2>xTK8@ft);Bzr`mKqT$;ipj>^MoXj=Q(rNvW zC_7)?P*fzYtfsBa5IUQjomuw!wLZTAGM90G!E?`!6`n!7E@j!XfBz8nMrs$ZC^!po ztj3`-H&_4w>Axr_&=GrES*awM;J^OMKR#ogGNS`6u$f29=7VG50?c)TBj3GKv(~G% zv!6D0H$@VSNg!q`Y|8v8@BoGLJ`8J#YocklDP!L<37Y!)`n9z+cuqj0lBLbZCGP`G z?7QJvo5uKR@g;1Hw3!DBn$SQaj)G@1U9qk(`^7L(_7ENd7mN>3?4q@k&;?` z6rm6n5ixyM&CGwEiRUjk8X%Vdrxg4pkaVD8svVH`Thd+()L=fg`CO6h$FHxvj0_EJ z;}u-{U^xoD4s48|O@!!mu!m-hm`^r*3u4`&$mi*v>BOV&U6lw!7>%(ac z44(VU+oL=dLPHyVFRo-6R=I8cpd1u*P8_~8B%I(w(K4QSk@xH4%trF#93;m+)wNY~Azfvd-7#|6x>;zmP*gWHtb^Gvd*m>3vRI?Z9} z5FD9bzN~Hn`G@6&3)KRZ=4ubX?c$)T#dS>Tb1Y---nBG0_mMZ4N?Q>pLPy}#)s;k2 zN-@SIGj+&`6DR1o`O%GM9J{!t7wp%AtUr0y@ZalJ{ z6YtRa8ftK1g`!i#I#tql!7<}~!RreI1*>p?Bx_(xR@OK$j$`WMM%ge18zpmNXj9fpW3PsWe(+d( zMj|m^J8|rYek~?Rc;E=zw?4_q$q5W3VNVmnphP-LOGw|`#^!NJrY_ij>S$~Itl>wo z{2|H@oofcZ8`d_LMzS_mN@Iu;_=$2Iv#VBe)s|zVn6g^YNMI{b3c(C10A8GXW>I_%h57 z rO_<#2#~$_O%0t7Dq2{=)+)nn!+UkY=qGJ_xj&pSZJ;&Ml& zGJSKZo!f0-4Hy)36jPRZI{0jRf&~nV-hIA0nld2%+#CDckxDs;AiD{Knk_t##A7gw zDf99^Mm&cR2k4ME(lRojSFqR0ZAey3&af*u>j#Dico%R>gS{G>Q0vn9huSYe^Z^C0 ze((pV#fS}1{W38#dkUlxvz{7kSK&H>yT>(P{{r3S2xf`+-tOkSssGu-u?TzWIY=?I=mphuc6D4FFmD~FpQ>7 z8D||7;wJ?PwV}7Wxa7}&jlO#~bv^{cLc-$hAF{Ht88I6B6U6QAT2q9j#ugnZ8@gIj z;yr$-hF)PQ#fk?(V@t@}b_%xg<0ZHi0sTyU@QIwR_+l^O=N&iC8BAj$hz|iFYGGla zp_j7eoWA}s9k3jjc)r1K=M+<{X}nmlAp+d<_ONFE_%1mXG_DEVtQM)6=4s~y`~_G{ zFJEmM&Za@EYC`~k0ZQH5x0auuT44Mj+QlnA6BH1@B*MUjV#tg+SA-Odpg?p9=T=N& z!1irbMI}D-W5`!v2IFF3K`@_SbPSis8^=BsM7}hhf@LVogHh5bG0p=GFge-G(69>z z88QoR4}>93o>Ef-2Lp}(#zGyPo!%fU;!G?zj&ZWL%n-h72e#k9Gr~al%hFBik9IC{ z0Q=~zn;BJCgn_nm$5)l6Kq}J6PksJqrCskD8{zHJY8bsi=_DET3;#Na5W+Tu2y(~> z{7~3gqQ_kjJ?HE^1z#MfCaoJ{uM)IWQW7)K>FTGDiM1QK+~PQVw`thd+nW=y9l3#s zh=`Bu(9n?XBuGU)Eyq6%V)qGHKOX%Tve;VU|L(FcJuR(=o12J~6gR*qih ze!tb%*B59rfM2wq4F2$~{|&Dqtj5sgB5s0z<*}JKp$Vz!KTl0A*5SSXFQimf1I&6L ztPB2`nG`?sZRFCpe*Cz#t ztooSjV#JMd^|?*yy6}dTg|jP?8z|7PDlsrJ?zvYd!*SPGs246=FWm+%%Oyoex52Oo z9*_=w2h;6*VDF^-uo=?~B!*R0R$u@&>kC$*jpqKGqE-Lj`Q3(kOssdP3KY8*C z%_zRTL&fQ9sf3sqyiYp7*xkjj^YWZ+d{41UGB}oS#ZFHjgyl{JcMdLB;i+gF?rfX=(#ckicjPlFLn<7n$;tcVp2Y}E7@G}_ZXhZ326_{~2$otnK138EYUkt#D61hm zUp^SlpsW*BQL#=K51l;8L|y$zHDWs>BjP1SLxVeLsdx!&)cEe*lkDt+{QQOmmYZe^ z&kF9_7xI7NqcI}SQtpC9YxKc0pd$hTpTmTqtGk_vNkykM%OLxGeZ5D7H|A9$C_RuC zfnyzXQsw0ecsO{Rc+7|Z43-WK2@f6!+^a)y(9>%H7GFRu<(P^}+1#g-GRl?!p@POQ zXAsx{lu(_GWx^PK-*Y8R$CR-Mq)tCX+9ka$1Vp$-4 z@SwxeOn)dHr|1kF_Z_vgHr?ruuPzPWzI98}_mst$q>1k}L<|{91iv|F#MaY%J76OX z$ab;BI9>On_~h7FR%eEX{X>s)F`=RV1{`_Kcb}wkzg)608lEIN{L$*^cfAG2w3(#UA`-+h%A4Ss*=j{F zC{xqWgl=U;Z^tMl=d%xids_F;ZIR});cX#$6 zFF=g2u(05ysjIFY1WB?Z&!j~FT=xrE?(`$j3+9_ujE#-8FFv-oiB;dOEeQ32v!w&^yId0i@CUOZ`_~ z7z@EP6$yd{;tTG)R@w53D-{*h z^^japhoruIximN0GCSOKhF2K@Keh5VpW`2VTPyhxev9*!p&hZLFoIw2MP9tv8kCoy(&qrEsD*2{3!rX=D5X;~4gu zLmy*khI*DM<9>~&g@sl=b?2X*xVPUD&q~#ihO=a@x6f2elEs zZ6Dhp89SK(JAD-Q|NE~kKwMixM+q;`%^2YKZb!ySyx+I|M*GwpmwdYJ0p^=VRQHmS zdV0#YrX7BjzE4KR(Bam_ue?vhEYM$f5+KUl7!HCB5i1|83Oiwhcj)!Z#^k0mEal5% z*`f5@Nx*d>A=Q|m5IDp{Ma2W`jn1Ame(A~shw7QWH*#Dq*wlzu>%mUND_B0Qskv@P zxENyUjo?=70Sbd@BACwDMPmDnG%Fsrl5VuJ>ha@9G%+*~g%uSmQ13=080CyieS&@n zHOL*c^8jUo7ZVc`LCp7pm(3kD=ueB_rwjm%q%6xNlYn1%W^r{-7L@HHdLGqU79f-p zMLJ}53dxG*m4TpI|5aVx7%MXHkEbFXR&z<1)W`ZrH}j8;N1UCVF&4%L3ECd1D;TrK z$O?$9{)pOGVTjYXpOEkh?t_T$tYX%`_cN&?U_tpb0?7>DEZ$FEOOpg5r=+49sMt#)Jt3E=a2h4We?X=I5Uj7u@e#l;C6o-}qS0oF ziB4mDc#9-M^G`5_x8{gXk(wzVYRAUL+7cg2q54-z79&QE9oU+juo7PzWmaPFfG zLqe6>)jiPo)v@mygFIY?zsup(9*XQbHp7Zy?Xu4K|N_n9gqwQC3KvhRHed z&a*Z1F~B5WaS|Zrwsa1n2$6 z4Y+ilVo#t$N&$TyoOp?a4j9^BxR7q0d%McP9J@FK=BZMU=C%M%21g=2qZ?K{vj_Ma zNiHBtyO}=(CLbrT89>fdHBR^wSC=>m*1VP$T}yUmX5E)BMh+Tbsh(5ab>cf^J+bKM zWM%cpIvA@^*h~g84{^O^9Xf#sbISPVo{jo`}7%X7B1MGCbx@+e|?ak}?s{yh?-5aV_Zy(>So9-b4$;l0AL-2 z)R0jb!SbL76puHNwHESIXtqEywl7#MUANH`$-=&1zm>^iaZSjgE=h9`ijzqctG4i9 z7NDfe2;!O5I#f1y*E$9*v`u+oy4R(z2j!=laMqUY?i|^>bmngpL@$I5*`bnwMOl=u ztR&~TOv`9X&-WbQ;n}dh54e|+NDfbJyK!uXN~RQyh@#ZfL1PcC!ahD^%OMC+7?t6w z*4pUj?=L~cDq{Hu&tJ8*swQu(3$aNLYJYz@!iw-qXQxO-UmM)j8}+v-U*F0qie*Pc zY(y+ZW!w1M5fR)n?jr|LC(^&_@fkFHaVdnGEj$3tlv5_V?>y1xXJLr}P~?>LL}Ko) zuve4#Z9{e!Jv~uga3lao=Hkql6Jmh*R-}WKFZXW0v8-F{{NHOryfXh;6B73oQ5-j7 z5q)I$;-du$BpUCJZ7zZNJIZ~wwojJx_$esHg;6oi?c2gLW-{{i)xSkp5q&`3v1D+s}@hK8KyL~MTkbgYO~IA`qDxhWIA9aCM;$5HdTSn?WM1t2-Z zn1sv@cZT{Fl1ic^%El&SR=(QhkS`%6h4Nv>>FIunZ%c?}?uT#x+D{^twWBzi7#Bxu zLs08$Vh z2t~V;x}PYcEG6I2^j`8D`8fn75+xVakEhnRVtJ=ZB<2j}n>g}zWPw^l9zqSM7;H{Vg_FPYi@UnR8Mc9 zUcnpK;}3tG_yh$twX`G&iAHk}p};4Of7V&Zqv+Uf5weVflt@rz!VjV{CoB7YGP(J)5`-;Ku&C`p}AS5NKw)YGI_r0U!9f`vK+ z(bq|mk`!pe*xpGI=q&2gRfHt7eGsOFwh;^RFXYk4Z1^wYl>bBP;Xjwx0){Q7h94y? R%oCqIp`vvxPucw1{{pSwidg^v literal 0 HcmV?d00001 diff --git a/contrib/hamilton/contrib/user/zilto/webscraper/requirements.txt b/contrib/hamilton/contrib/user/zilto/webscraper/requirements.txt new file mode 100644 index 000000000..355b16d8c --- /dev/null +++ b/contrib/hamilton/contrib/user/zilto/webscraper/requirements.txt @@ -0,0 +1,4 @@ +beautifulsoup4 +lxml +requests +sf-hamilton[visualization] diff --git a/contrib/hamilton/contrib/user/zilto/webscraper/run.ipynb b/contrib/hamilton/contrib/user/zilto/webscraper/run.ipynb new file mode 100644 index 000000000..6ab56dd1e --- /dev/null +++ b/contrib/hamilton/contrib/user/zilto/webscraper/run.ipynb @@ -0,0 +1,273 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Imports" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from pprint import pprint\n", + "from IPython.display import display\n", + "from hamilton import driver\n", + "\n", + "import __init__ as webscraper" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Note: Hamilton collects completely anonymous data about usage. This will help us improve Hamilton over time. See https://github.com/dagworks-inc/hamilton#usage-analytics--data-privacy for details.\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "%3\n", + "\n", + "\n", + "cluster__legend\n", + "\n", + "Legend\n", + "\n", + "\n", + "\n", + "parsed_html\n", + "\n", + "parsed_html\n", + "ParsingResult\n", + "\n", + "\n", + "\n", + "parsed_html_collection\n", + "\n", + "\n", + "parsed_html_collection\n", + "List\n", + "\n", + "\n", + "\n", + "parsed_html->parsed_html_collection\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "url\n", + "\n", + "\n", + "url\n", + "Parallelizable\n", + "\n", + "\n", + "\n", + "url->parsed_html\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "html_page\n", + "\n", + "html_page\n", + "str\n", + "\n", + "\n", + "\n", + "url->html_page\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "html_page->parsed_html\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "_parsed_html_inputs\n", + "\n", + "remove_lines\n", + "bool\n", + "tags_to_extract\n", + "List\n", + "tags_to_remove\n", + "List\n", + "\n", + "\n", + "\n", + "_parsed_html_inputs->parsed_html\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "_url_inputs\n", + "\n", + "urls\n", + "list\n", + "\n", + "\n", + "\n", + "_url_inputs->url\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "input\n", + "\n", + "input\n", + "\n", + "\n", + "\n", + "function\n", + "\n", + "function\n", + "\n", + "\n", + "\n", + "expand\n", + "\n", + "\n", + "expand\n", + "\n", + "\n", + "\n", + "collect\n", + "\n", + "\n", + "collect\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "dr = (\n", + " driver.Builder()\n", + " .enable_dynamic_execution(allow_experimental_mode=True) # this allows parallel/collect nodes\n", + " .with_modules(webscraper)\n", + " .build()\n", + ")\n", + "\n", + "display(dr.display_all_functions(None))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/tjean/projects/dagworks/hamilton/contrib/hamilton/contrib/user/zilto/webscraper/__init__.py:52: GuessedAtParserWarning: No parser was explicitly specified, so I'm using the best available HTML parser for this system (\"lxml\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n", + "\n", + "The code that caused this warning is on line 52 of the file /home/tjean/projects/dagworks/hamilton/contrib/hamilton/contrib/user/zilto/webscraper/__init__.py. To get rid of this warning, pass the additional argument 'features=\"lxml\"' to the BeautifulSoup constructor.\n", + "\n", + " soup = BeautifulSoup(html_page)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['parsed_html_collection']\n" + ] + } + ], + "source": [ + "final_vars = [\"parsed_html_collection\"]\n", + "\n", + "inputs = dict(\n", + " urls=[\n", + " \"https://blog.dagworks.io/p/llmops-production-prompt-engineering\",\n", + " ]\n", + ")\n", + "\n", + "overrides = dict()\n", + "\n", + "res = dr.execute(\n", + " final_vars=final_vars,\n", + " inputs=inputs,\n", + " overrides=overrides\n", + ")\n", + "\n", + "pprint(list(res.keys()), width=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'What you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way. Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story. (2): we’ll use prompt & prompt template interchangeably. (3): we’ll assume an “online” web-service setting is where these prompts are being used. (4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto. (5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example. (6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time. Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this! Point:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models. In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices. DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have: your LLM workflow is simply code. and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters. Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed. To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact. There are two main ways to treat prompts: Prompts as dynamic runtime variables. The template used isn’t static to a deployment. Prompts as code.The prompt template is static/ predetermined given a deployment. The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches. Prompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one. The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available. The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application! The downside to this iteration speed is increased operational burden: To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture. Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things. You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use? You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code. You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage. Our PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions: To operate things, you’ll want to either inject the prompts at request time: Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance): Driver code: Here we outline a few ways to monitor what went on. Log results of execution. That is run Hamilton, then emit information to wherever you want it to go. Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to! Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs): Extend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring. In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)! Since prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic. The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler. This approach has many operational benefits: Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt. You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with. You can add checks to your CI/CD system to ensure bad prompts don’t make it to production. It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily. There is no other “prompt system” to maintain or manage. Simplifying operations. It doesn’t preclude adding extra monitoring and visibility. The prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG): Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code. Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts. You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value. Alternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use. So here we have one module housing V1 of our prompt: Here we have one module housing V2 (see how they differ slightly): In the driver code below, we choose the right module to use based on some context. Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module. Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously. To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are: Request any intermediate outputs and log them yourself outside of Hamilton. Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level. Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)! or all the above! With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly. The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths. Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping. Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it. Share In this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you. To recap: Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code. Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable. If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat: 📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started. ⭐️ us onGitHub. 📝 leave us anissueif you find something. 📚 read ourdocumentation. ⌨️ interactivelylearn about Hamilton in your browser. We have a growing collection of posts & content. Here are some we think you might be interested in. Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which the In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strong tryhamilton.dev– an interactive tutorial in your browser! Hamilton + Airflow(GitHub repo) Hamilton + Feast(GitHub repo) Pandas data transformations in Hamilton in 5 minutes Lineage + Hamilton in 10 minutes No posts Ready for more? your LLM workflow is simply code. and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters. Prompts as dynamic runtime variables. The template used isn’t static to a deployment. Prompts as code.The prompt template is static/ predetermined given a deployment. To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture. Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things. You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use? You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code. You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage. Log results of execution. That is run Hamilton, then emit information to wherever you want it to go. Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs): Extend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring. Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt. You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with. You can add checks to your CI/CD system to ensure bad prompts don’t make it to production. It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily. There is no other “prompt system” to maintain or manage. Simplifying operations. It doesn’t preclude adding extra monitoring and visibility. Request any intermediate outputs and log them yourself outside of Hamilton. Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level. Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)! or all the above! Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code. Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable. 📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started. ⭐️ us onGitHub. 📝 leave us anissueif you find something. 📚 read ourdocumentation. ⌨️ interactivelylearn about Hamilton in your browser. tryhamilton.dev– an interactive tutorial in your browser! Hamilton + Airflow(GitHub repo) Hamilton + Feast(GitHub repo) Pandas data transformations in Hamilton in 5 minutes Lineage + Hamilton in 10 minutes DAGWorks’s SubstackSubscribeSign inShare this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherDiscover more from DAGWorks’s SubstackThought posts, and updates on Hamilton and the DAGWorks Platform.SubscribeContinue readingSign inLLMOps: Production prompt engineering patterns with HamiltonAn overview of the production grade ways to iterate on prompts with Hamilton.DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 20234Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareWhat you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareCommentsTopNewNo postsReady for more?Subscribe© 2023 DAGWorks Inc.Privacy∙Terms∙Collection noticeStart WritingGet the appSubstackis the home for great writing DAGWorks’s SubstackSubscribeSign inShare this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherDiscover more from DAGWorks’s SubstackThought posts, and updates on Hamilton and the DAGWorks Platform.SubscribeContinue readingSign inLLMOps: Production prompt engineering patterns with HamiltonAn overview of the production grade ways to iterate on prompts with Hamilton.DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 20234Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareWhat you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareCommentsTopNewNo postsReady for more?Subscribe© 2023 DAGWorks Inc.Privacy∙Terms∙Collection noticeStart WritingGet the appSubstackis the home for great writing DAGWorks’s SubstackSubscribeSign in DAGWorks’s SubstackSubscribeSign in DAGWorks’s SubstackSubscribeSign in DAGWorks’s SubstackSubscribeSign in SubscribeSign in SubscribeSign in Subscribe Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherDiscover more from DAGWorks’s SubstackThought posts, and updates on Hamilton and the DAGWorks Platform.SubscribeContinue readingSign inLLMOps: Production prompt engineering patterns with HamiltonAn overview of the production grade ways to iterate on prompts with Hamilton.DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 20234Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareWhat you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareCommentsTopNewNo postsReady for more?Subscribe Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherDiscover more from DAGWorks’s SubstackThought posts, and updates on Hamilton and the DAGWorks Platform.SubscribeContinue readingSign inLLMOps: Production prompt engineering patterns with HamiltonAn overview of the production grade ways to iterate on prompts with Hamilton.DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 20234Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareWhat you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareCommentsTopNewNo postsReady for more?Subscribe Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherDiscover more from DAGWorks’s SubstackThought posts, and updates on Hamilton and the DAGWorks Platform.SubscribeContinue readingSign inLLMOps: Production prompt engineering patterns with HamiltonAn overview of the production grade ways to iterate on prompts with Hamilton.DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 20234Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareWhat you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShare Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherDiscover more from DAGWorks’s SubstackThought posts, and updates on Hamilton and the DAGWorks Platform.SubscribeContinue readingSign inLLMOps: Production prompt engineering patterns with HamiltonAn overview of the production grade ways to iterate on prompts with Hamilton.DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 20234Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShareWhat you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShare Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this post LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.io LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.io blog.dagworks.io Copy linkFacebookEmailNoteOther Copy link Facebook Email Note Other Discover more from DAGWorks’s SubstackThought posts, and updates on Hamilton and the DAGWorks Platform.SubscribeContinue readingSign in Thought posts, and updates on Hamilton and the DAGWorks Platform. Continue reading Continue reading Sign in LLMOps: Production prompt engineering patterns with HamiltonAn overview of the production grade ways to iterate on prompts with Hamilton.DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 20234Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShare DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 20234Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShare DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 2023 DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 2023 DAGWorks Inc.,Stefan Krawczyk, andThierry JeanSep 6, 2023 DAGWorks Inc.,Stefan Krawczyk, andThierry Jean DAGWorks Inc. Stefan Krawczyk Thierry Jean Sep 6, 2023 Sep 6, 2023 4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShare 4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther 4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther 4 Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this post LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.io LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.io blog.dagworks.io Copy linkFacebookEmailNoteOther Copy link Facebook Email Note Other Share Share What you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShare What you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes What you send to your LLM is quite important. Small variations and changes can have large impacts on outputs, so as your product evolves, the need to evolve your prompts will too. LLMs are also constantly being developed and released, and so as LLMs change, your prompts will also need to change. Therefore it’s important to set up an iteration pattern to operationalize how you “deploy” your prompts so you and your team can move efficiently, but also ensure that production issues are minimized, if not avoided. In this post, we’ll guide you through the best practices of managing prompts with Hamilton, making analogies to MLOps patterns, and discussing trade-offs along the way.Notes:(1): if you’re looking for a post that talks about “context management” this isn’t that post. But it is the post that will help you with the nuts and bolts on how to iterate and create that production grade “prompt context management” iteration story.(2): we’ll use prompt & prompt template interchangeably.(3): we’ll assume an “online” web-service setting is where these prompts are being used.(4): we’ll be using ourHamilton’s PDF summarizer exampleto project our patterns onto.(5): not familiar withHamilton? You can either learn about Hamilton viaTry Hamiltonand come back, or get the high level LLMOps approach from this post and then dig into Hamilton via thePDF Summarizer example.(6): what’s our credibility here? We’ve spent our careers building self-service data/MLOps tooling, most famously for Stitch Fix’s 100+ Data Scientists. So we’ve seen our share of outages and approaches play out over time.Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!SubscribePrompts are to LLMs what hyper-parameters are to ML modelsPoint:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models.In terms of “Ops” practices, LLMOps is still in its infancy. MLOps is a little older, but still neither are widely adopted if you’re comparing it to how widespread knowledge is around DevOps practices.DevOps practices largely concern themselves with how you ship code to production, and MLOps practices how to ship code& data artifacts(e.g., statistical models)to production. So what about LLMOps? Personally, I think it’s closer to MLOps since you have:your LLM workflow is simply code.and an LLM API is a data artifact that can be “tweaked” using prompts, similar to a machine learning (ML) model and its hyper-parameters.Therefore, you most likely care about versioning the LLM API + prompts together tightly for good production practices. For instance, in MLOps practice, you’d want a process in place to validate your ML model still behaves correctly whenever its hyper-parameters are changed.How should you think about operationalizing a prompt?To be clear, the two parts to control for are theLLMand theprompts. Much like MLOps, when the code or the model artifact changes, you want to be able to determine which did. For LLMOps, we’ll want the same discernment, separating the LLM workflow from the LLM API + prompts. Importantly, we should consider LLMs (self-hosted or APIs) to be mostly static since we less frequently update (or even control) their internals. So, changing thepromptspart of LLM API + prompts is effectively like creating a new model artifact.There are two main ways to treat prompts:Prompts as dynamic runtime variables. The template used isn’t static to a deployment.Prompts as code.The prompt template is static/ predetermined given a deployment.The main difference is the amount of moving parts you need to manage to ensure a great production story. Below, we dig into how to use Hamilton in the context of these two approaches.Prompts as dynamic runtime variablesDynamically Pass/Load PromptsPrompts are just strings. Since strings are a primitive type in most languages, this means that they are quite easy to pass around.\\xa0 The idea is to abstract your code so that at runtime you pass in the prompts required.\\xa0 More concretely, you’d “load/reload” prompt templates whenever there’s an “updated” one.The MLOps analogy here, would be to auto-reload the ML model artifact (e.g., a pkl file) whenever a new model is available.MLOps Analogy: diagram showing how ML model auto reloading would look.Diagram showing what dynamically reloading/querying prompts would look like.The benefit here is that you can very quickly roll out new prompts because you do not need to redeploy your application!The downside to this iteration speed is increased operational burden:To someone monitoring your application, it’ll be unclear when the change occurred and whether it’s propagated itself through your systems. For example, you just pushed a new prompt, and the LLM now returns more tokens per request, causing latency to spike; whoever is monitoring will likely be puzzled, unless you have a great change log culture.Rollback semantics involve having to know aboutanothersystem. You can’t just rollback a prior deployment to fix things.You’ll need great monitoring to understand what was run and when; e.g., when customer service gives you a ticket to investigate, how do you know what prompt was in use?You’ll need to manage and monitor whatever system you’re using to manage and store your prompts. This will be an extra system you’ll need to maintain outside of whatever is serving your code.You’ll need to manage two processes, one for updating and pushing the service, and one for updating and pushing prompts. Synchronizing these changes will be on you. For example, you need to make a code change to your service to handle a new prompt. You will need to coordinate changing two systems to make it work, which is extra operational overhead to manage.How it would work with HamiltonOur PDF summarizer flow would look something like this if you removesummarize_text_from_summaries_promptandsummarize_chunk_of_text_promptfunction definitions:summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton.To operate things, you’ll want to either inject the prompts at request time:from hamilton import base, driver\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization_sortened)\\n .build()\\n)\\n\\n# pull prompts from somewhere\\nsummarize_chunk_of_text_prompt = \"\"\"SOME PROMPT FOR {chunked_text}\"\"\"\\nsummarize_text_from_summaries_prompt = \"\"\"SOME PROMPT {summarized_chunks} ... {user_query}\"\"\"\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n \"summarize_chunk_of_text_prompt\": summarize_chunk_of_text_prompt,\\n ...\\n }\\n)Or\\xa0you change your code to dynamically load prompts, i.e., add functions to retrieve prompts from an external system as part of the Hamilton dataflow. At each invocation, they will query for the prompt to use (you can of course cache this for performance):# prompt_template_loaders.py\\n\\ndef summarize_chunk_of_text_prompt(\\n db_client: Client, other_args: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query( \\n \"get latest prompt X from DB\", other_args)\\n return _prompt\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n return _promptDriver code:from hamilton import base, driver\\nimport prompt_template_loaders # <-- load this to provide prompt input\\nimport summarization_shortend\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompt_template_loaders,# <-- Hamilton will call above functions\\n summarization_sortened, \\n )\\n .build()\\n)\\n\\n# execute, and pass in the prompt \\nresult = dr.execute(\\n [\"summarized_text\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)How would I log prompts used and monitor flows?Here we outline a few ways to monitor what went on.Log results of execution. That is run Hamilton, then emit information to wherever you want it to go.result = dr.execute(\\n [\"summarized_text\", \\n \"summarize_chunk_of_text_prompt\", \\n ... # and anything else you want to pull out\\n \"summarize_text_from_summaries_prompt\"],\\n inputs={\\n # don\\'t need to pass prompts in this version\\n }\\n)\\n\\nmy_log_system(result) # send what you want for safe keeping to some\\n # system that you own.Note. In the above, Hamilton allows you to requestanyintermediateoutputs simply by requesting “functions” (i.e. nodes in the diagram) by name. If we really want to get all the intermediate outputs of the entire dataflow, we can do so and log it wherever we want to!Use loggers inside Hamilton functions (to see the power of this approach,see my old talk on structured logs):import logging\\n\\nlogger = logging.getLogger(__name__)\\n\\ndef summarize_text_from_summaries_prompt(\\n db_client: Client, another_arg: str) -> str:\\n # pseudo code here, but you get the idea:\\n _prompt = db_client.query(\\n \"get latest prompt Y from DB\", another_arg)\\n logger.info(f\"Prompt used is [{_prompt}]\")\\n return _promptExtend Hamilton to emit this information. You use Hamilton to capture information from executed functions, i.e. nodes, without needing to insert logging statement inside the function’s body. This promotes reusability since you can toggle logging between development and production settings at the Driver level. SeeGraphAdapters, or write your ownPython decoratorto wrap functions for monitoring.In any of the above code, you could easily pull in a 3rd party tool to help track & monitor the code, as well as the external API call, e.g. data dog. Note, with a one-line code change, you can plug in the DAGWorks’s Driver and get all that monitoring you’d want and more. (Try the free tierhere)!Prompts as codePrompts as static stringsSince prompts are simply strings, they’re also very amenable to being stored along with your source code. The idea is to store as many prompt versions as you like, within your code so that at runtime, the set of prompts available is fixed and deterministic.The MLOps analogy here is, instead of dynamically reloading models, you instead bake the ML model into the container/hard code the reference. Once deployed, your app has everything that it needs. The deployment is immutable; nothing changes once it’s up. This makes debugging & determining what’s going on, much simpler.MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment.Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API.This approach has many operational benefits:Whenever a new prompt is pushed, it forces a new deployment. Rollback semantics are clear if there’s an issue with a new prompt.You can submit a pull request (PR) for the source code and prompts at the same time. It becomes simpler to review what the change is, and the downstream dependencies of what these prompts will touch/interact with.You can add checks to your CI/CD system to ensure bad prompts don’t make it to production.It’s simpler to debug an issue. You just pull the (Docker) container that was created and you’ll be able to exactly replicate any customer issue quickly and easily.There is no other “prompt system” to maintain or manage. Simplifying operations.It doesn’t preclude adding extra monitoring and visibility.How it would work with HamiltonThe prompts would be encoded into functions into the dataflow/directed acyclic graph (DAG):What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton.Pairing this code withgit, we have a lightweight versioning system for your entire dataflow (i.e. “chain”), so you can always discern what state the world was in, given a git commit SHA. If you want to manage and have access to multiple prompts at any given point in time, Hamilton has two powerful abstractions to enable you to do so:@config.whenandPython modules. This allows you to store and keep available all older prompt versions together and specify which one to use via code.@config.when (docs)Hamilton has a concept of decorators, which are just annotations on functions. The@config.whendecorator allows to specify alternative implementations for a functions, i.e. “node”, in your dataflow. In this case, we specify alternative prompts.from hamilton.function_modifiers import config\\n\\n@config.when(version=\"v1\")\\ndef summarize_chunk_of_text_prompt__v1() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"\\n\\n@config.when(version=\"v2\")\\ndef summarize_chunk_of_text_prompt__v2(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"You can keep adding functions annotated with@config.when, allowing you to swap between them using configuration passed to the HamiltonDriver. When instantiating theDriver, it will construct the dataflow using the prompt implementation associated with the configuration value.from hamilton import base, driver\\nimport summarization\\n\\n# create driver\\ndr = (\\n driver.Builder()\\n .with_modules(summarization)\\n .with_config({\"version\": \"v1\"}) # V1 is chosen. Use \"v2\\' for V2.\\n .build()\\n)Module switchingAlternatively to using@config.when, you can instead place your different prompt implementations into different Python modules. Then, atDriverconstruction time, pass the correct module for the context you want to use.So here we have one module housing V1 of our prompt:# prompts_v1.py\\ndef summarize_chunk_of_text_prompt() -> str:\\n \"\"\"V1 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text. Extract any key points with reasoning.\\\\n\\\\nContent:\"Here we have one module housing V2 (see how they differ slightly):# prompts_v2.py\\ndef summarize_chunk_of_text_prompt(content_type: str = \"an academic paper\") -> str:\\n \"\"\"V2 prompt for summarizing chunks of text.\"\"\"\\n return f\"Summarize this text from {content_type}. Extract the key points with reasoning. \\\\n\\\\nContent:\"In the driver code below, we choose the right module to use based on some context.# run.py\\nfrom hamilton import driver\\nimport summarization\\nimport prompts_v1\\nimport prompts_v2\\n\\n# create driver -- passing in the right module we want\\ndr = (\\n driver.Builder()\\n .with_modules(\\n prompts_v1, # or prompts_v2\\n summarization,\\n )\\n .build()\\n)Using the module approach allows us to encapsulate and version whole sets of prompts together. If you want to go back in time (via git), or see what a blessed prompt version was, you just need to navigate to the correct commit, and then look in the right module.How would I log prompts used and monitor flows?Assuming you’re using git to track your code, you wouldn’t need to record what prompts were being used. Instead, you’d just need to know what git commit SHA is deployed and you’ll be able to track the version of your code and prompts simultaneously.To monitor flows, just like the above approach, you have the same monitoring hooks available at your disposal, and I wont repeat them here, but they are:Request any intermediate outputs and log them yourself outside of Hamilton.Log them from within the function yourself, or build aPython decorator/GraphAdapterto do it at the framework level.Integrate 3rd party tooling for monitoring your code and LLM API calls, or use the DAGWorks Platform offering to monitor it all. (Try the free tierhere)!or all the above!What about A/B testing my prompts?With any ML initiative, it’s important to measure business impacts of changes. Likewise, with LLMs + prompts, it’ll be important to test and measure changes against important business metrics. In the MLOps world, you’d be A/B testing ML models to evaluate their business value by dividing traffic between them. To ensure the randomness necessary to A/B tests, you wouldn’t know at runtime which model to use until a coin is flipped. However, to get those models out, they both would have follow a process to qualify them. So for prompts, we should think similarly.The above two prompt engineering patterns don’t preclude you from being able to A/B test prompts, but it means you need to manage a process to enable however many prompt templates you’re testing in parallel. If you’re also adjusting code paths, having them in code will be simpler to discern and debug what is going on, and you can make use of the `@config.when` decorator / python module swapping for this purpose. Versus, having to critically rely on your logging/monitoring/observability stack to tell you what prompt was used if you’re dynamically loading/passing them in and then having to mentally map which prompts go with which code paths.Note, this all gets harder if you start needing to change multiple prompts for an A/B test because you have several of them in a flow. For example you have two prompts in your workflow and you’re changing LLMs, you’ll want to A/B test the change holistically, rather than individually per prompt. Our advice, by putting the prompts into code your operational life will be simpler, since you’ll know what two prompts belong to what code paths without having to do any mental mapping.Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.ShareSummaryIn this post, we explained two patterns for managing prompts in a production environment with Hamilton. The first approach treatsprompts asdynamic runtime variables,while the second, treatsprompts as codefor production settings. If you value reducing operational burden, then our advice is to encode prompts as code, as it is operationally simpler, unless the speed to change them really matters for you.To recap:Prompts as dynamic runtime variables. Use an external system to pass the prompts to your Hamilton dataflows, or use Hamilton to pull them from a DB. For debugging & monitoring, it’s important to be able to determine what prompt was used for a given invocation. You can integrate open source tools, or use something like the DAGWorks Platform to help ensure you know what was used for any invocation of your code.Prompts as code.Encoding the prompts as code allows easy versioning with git. Change management can be done via pull requests and CI/CD checks. It works well with Hamilton’s features like@config.whenand module switching at the Driver level because it determines clearly what version of the prompt is used. This approach strengthens the use of any tooling you might use to monitor or track, like the DAGWorks Platform, as prompts for a deployment are immutable.We want to hear from you!If you’re excited by any of this, or have strong opinions, leave a comment, or drop by our Slack channel! Some links to do praise/complain/chat:📣join our community on Slack—\\u200awe’re more than happy to help answer questions you might have or get you started.⭐️ us onGitHub.📝 leave us anissueif you find something.📚 read ourdocumentation.⌨️ interactivelylearn about Hamilton in your browser.Other Hamilton posts you might be interested in:We have a growing collection of posts & content. Here are some we think you might be interested in.Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full storyBuilding a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full storytryhamilton.dev– an interactive tutorial in your browser!Hamilton + Airflow(GitHub repo)Hamilton + Feast(GitHub repo)Pandas data transformations in Hamilton in 5 minutesLineage + Hamilton in 10 minutes Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!Subscribe Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this!Subscribe Thanks for reading DAGWorks’s Substack! Subscribe for free to receive updates and posts like this! Subscribe Subscribe Subscribe Subscribe Point:Prompts + LLM APIs are analogous to hyper-parameters + machine learning models. MLOps Analogy: diagram showing how ML model auto reloading would look. Diagram showing what dynamically reloading/querying prompts would look like. summarization_shortened.py. Note the two inputs “*_prompt” that denote prompts that are now required as input to the dataflow to function. With Hamilton you’ll be able to determine what inputs should be required for your prompt template by just looking at a diagram like this. Diagram created via Hamilton. MLOps Analogy: make an immutable deployment by making the model fixed for your app’s deployment. Diagram showing how treating prompts as code enables you to leverage your CI/CD and build an immutable deployment for talking to your LLM API. What summarization.py in the PDF summarizer example looks like. The prompt templates are part of the code. Diagram created via Hamilton. Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it.Share Thank you for reading DAGWorks’s Substack. This post is public so feel free to share it. Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full story Containerized PDF Summarizer with FastAPI and HamiltonThierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which theRead full story Thierry Jean,DAGWorks Inc., andStefan Krawczyk·Aug 18 Thierry Jean,DAGWorks Inc., andStefan Krawczyk · Aug 18 Skip learning convoluted LLM-specific frameworks and write your first LLM application using regular Python functions and Hamilton! In this post, we’ll present a containerized PDF summarizer powered by the OpenAI API. Its flow is encoded in Hamilton, which the Read full story Read full story Building a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full story Building a maintainable and modular LLM application stack with HamiltonThierry JeanandDAGWorks Inc.·Jul 11In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strongRead full story Thierry JeanandDAGWorks Inc.·Jul 11 Thierry JeanandDAGWorks Inc. · Jul 11 In this post, we’re going to share how Hamilton can help you write modular and maintainable code for your large language model (LLM) application stack. Hamilton is great for describing any type of dataflow, which is exactly what you’re doing when building an LLM powered application. With Hamilton you get strong Read full story Read full story 4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShare 4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOtherShare 4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther 4Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther 4 Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this postLLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther Share this post LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.ioCopy linkFacebookEmailNoteOther LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.io LLMOps: Production prompt engineering patterns with Hamiltonblog.dagworks.io blog.dagworks.io Copy linkFacebookEmailNoteOther Copy link Facebook Email Note Other Share Share Comments Comments Comments TopNewNo posts TopNewNo posts TopNewNo posts TopNewNo posts TopNew TopNew Top New No posts Ready for more?Subscribe Ready for more?Subscribe Subscribe Subscribe Subscribe Subscribe © 2023 DAGWorks Inc.Privacy∙Terms∙Collection noticeStart WritingGet the appSubstackis the home for great writing © 2023 DAGWorks Inc.Privacy∙Terms∙Collection noticeStart WritingGet the appSubstackis the home for great writing © 2023 DAGWorks Inc.Privacy∙Terms∙Collection noticeStart WritingGet the appSubstackis the home for great writing © 2023 DAGWorks Inc.Privacy∙Terms∙Collection notice © 2023 DAGWorks Inc. Privacy∙Terms∙Collection notice Start WritingGet the app Substackis the home for great writing This site requires JavaScript to run correctly. Pleaseturn on JavaScriptor unblock scripts'" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "res[\"parsed_html_collection\"][0].parsed" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/contrib/hamilton/contrib/user/zilto/webscraper/tags.json b/contrib/hamilton/contrib/user/zilto/webscraper/tags.json new file mode 100644 index 000000000..384491ed4 --- /dev/null +++ b/contrib/hamilton/contrib/user/zilto/webscraper/tags.json @@ -0,0 +1,7 @@ +{ + "schema": "1.0", + "use_case_tags": ["webscraper"], + "secondary_tags": { + "language": "English" + } +} diff --git a/contrib/hamilton/contrib/user/zilto/webscraper/valid_configs.jsonl b/contrib/hamilton/contrib/user/zilto/webscraper/valid_configs.jsonl new file mode 100644 index 000000000..e69de29bb