From b160ec78b5d37813d52973f75f8aecba0ab2583c Mon Sep 17 00:00:00 2001 From: Prachi Pandey <115707069+prachipandey16@users.noreply.github.com> Date: Thu, 29 Feb 2024 22:30:07 +0530 Subject: [PATCH] Contribution Contribution --- Project_Problem_Statement.xlsx | Bin 0 -> 12591 bytes README.md | 193 +++++++-------------------------- final_03.py | 159 +++++++++++++++++++++++++++ useretrivalcode.py | 82 ++++++++++++++ 4 files changed, 278 insertions(+), 156 deletions(-) create mode 100644 Project_Problem_Statement.xlsx create mode 100644 final_03.py create mode 100644 useretrivalcode.py diff --git a/Project_Problem_Statement.xlsx b/Project_Problem_Statement.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..3452fd3853db3001229ad303381ca6f6111f1631 GIT binary patch literal 12591 zcmeHtg;!il_I2Y?jGDdxVyVUu;A_%f;$9vcWETJLvVNh@!rgQZzeO}Uodlf zb=B&1?>VdbR_(K^&e;mmVBlx~NB}ed03Ze!o@SZrf&c)q-~a$B02)+V#Lm{)#MW6) z#lzmjN#~=xjWy9Xa8RmT0OWf;XHIpjvAE76))zCar0VRn})!!fmS8>h=8FkRb5;ThW^20 z@=i?bctK_+ZHy(rTRK&nGyuM(^1rjS%QjyXC8zyEgD(x4jh`am;$sa7%9 z&~o>NoZ1>P=Say6(l!uv*3yF7OZRGZZ zPTvT0jWHy)9dUJH9kR89gFu!Qr?%wlb*a`stQBpi_h)a!(r_0snNR4+#?S$e*YVV@cF^fX z@_`@oDk17I!G$>_c{)yP3O9laTVDa&3>HDQhaulOA3t z6`#2ftwEansEz}JT7VaV!Iu`GH6W|GW_Vu-GAFEjQV~+!$dP-PFqz>upImr=AP~kS zb3C1fKICL*wpi&sWKDeYfTOBx&Sg<;nC-|-;-P13-E}3J(TV!vO)r-@q(s7sa?d;| zF-V?!3jZ3~qLg zR)%(VR)6qTxvGI(1~=LZ|G^8?&Ka5vBTVsuD3N*G63t<)S^|wKB45|_% z8;nJ(jJ(HcL|By1_~we&WP7IHT5|A3pK-4koA~{hIAobp+{K|oWCuNp)0aqbAWeR7 z@d6fK9i&gk{oc%j_|T0~SUm_pl|{T+$wgmNHoY#M-Rzqwx`vu?vP!oSzm_aL_IP?G zlPX5C(Q>w`l5QTpFp+#WQW?6uCf#&{ane%2Cp86#>x|5K*%-CLqOK&B%H<^)F-@?( zc+AgyZn;BLGy1n0Fp?<{P3`@C3sOzI_eQvoMSiHM(YmUaDqqFRXR!sfd8Q>^jh1`v zb)XvtnK%)K>&d*fxI>}Trk{vt&-{1W0}Y<4fQ+BYW~2b!PeeFe_D)m%4_`5lT`6^f ze%`RryA;A2qy12viKI=A0wN_F_O96sHDv#21C+5iLt<%SLUBzN#MPkNz|{kY@6Hvl zv-fyJbHEM01RK_V`zrQLDYv&>&#$Z*k!J*gYNg3QKqqV{pW;eQpzU{5WZ(kC#QZfAPR{> zRXxq|kSmj0(nEdfkKi5w$7WBpSwb5Z8+lSD{)D6>|}(0JF~4{ja_&J{Af&PApHwW0b0Gqjut1HbtVJ{F4t;Qb=G8{l; zweJ=`P7lQf_$FnVWX)E*9wcH=@9W5~3K1?xSLv8(yDS#lLY6q_hwJd*Ep=Ee&44hi z#+{{-x^1M!;qoXmXAaE3041l_12^9G!?L!5UP<#cf2ouPe?P$Cm*uc#%R?x(+(SC| zk#-dpNA&`rt%6hLQmwO7qE50&X2K644FVI%s*LNO%0ArY$IbDL7hC43xum%s3C0Zj z-c>g**&NT@R04uBGzNX%t~>FcVb?H^`+A2@!lCky%KGQR%@~RzLR-IE!T4SOJgm(d zloL_Q>5b5Phv2`1G=M)bBgV;dc_nGfd|L?xam1L#+ z7?IlHUPBq(GF;J-7o8bMj#ZC7Ko9?*UZx;s@x5NhrD@U8_$kc*W*6ddHa_Tj$A-8C zhI-RYTl^IT+!NLMj0eJQ{AwHw+VG@;AW#AV73KKwsO$s|G2IEZnLmDrE*p*W*F6F!&z$nq^@PM`-N z@CqA7k{7`P0NkG?roHbLyM?}Ndp4Ezn6T>3e}vz~E0ZQDW#jV>aV82Kk|6Y>j(6Jq zme#6zw&^VVG(z3Ed`UNYeP+5dJUF-hKGXlcWU|^@nztbU05?nk0PFpUzsGeaa}yJ1 zCx$;erauCFW};5aH%7FOGmgbKK~Dw+L2wc|as0YE^^BLc$^t3U0&DHg+QqkyP*Cph za4QN$`GU4dFS*HpgBeMci2&+xlPuEX#~}l@B#5d;uEz@>HmTTrgc3NV;q=}8F;>rX zpJzrssrngmqX7`%3MUnu8RP6Ci(znJrL_hE4{ixK)4940-zt?jEGMNP_Xk3A&Rm)m5 zwUxQ2sL{|aaZ zZ4BhR9o2!fbl#M79bHS?N{iW;T#qbw3HAgP1NnO?##~jd{OfC}s|flF97d zGo;JHA>!MXIifA^>|>kdIDBneoJE|HpNM`OUt%(TiBgTkfuz9VI`OV7SgGge=piSJ zU0RzWfnj^@!YD6wybj!SeOYe#VO4$pk-%;~Xnkbj8gm!TTI4oKbLbG0K_)G~FH4X< zfeELfV`Q6hps^qzlZ3+`frnAkL6=$9Adr}e1E(=tMNJ>j8NG3m4~_asPKO$wH!N1< zZs`-Sby0-@)I1fvOOwzGgzgL)UOAjpKI>V=6mXq?UEo;upYz%NY{}UNhF##*czzf?6pJ!O`*lpZB{+K*WEGG7qZT8 zCyfbpKkA+qtO>v7CC%JLb?Ua;{VbaiwDuxlhBiTkAO^*y)&UP2 zAs1d2KS7C;W&Ma8(U9^MOp7Ool5z0S(I_lCa+VbIreID#??{Y zkULq}KPrf5feSh?D~Lz~9$e@G2J~hc3H;DR0%_ur>HkB1cD-cV6N8AhdeECp?c^*OJwjl66jJfB$Wsv7E8&JR#HygN0gbFv)ju@ zC%4xNokl^tDCtjduKSG0A zrhtRg(*|zPkwQ<@5-=^>fLor0p(}XFmkZADH>qu5JBSLpMSnp7#(WQ|$+zHfy&0gw z5F#3b5F!d87a|gz^G~&i^Jl5Iq6iqXfCha%{o@M*nobDKT(`Afu^3020OMAqm4zP* zm)&UszvJDF<*qY~H8h{j2%}=ro0>MKS;K%po`ciq)_qDN3i;IEHDt@ z**!dCF9Qnn3o)oC9f`CcihgRtR)M+%D^y#SB-b}FsL+d#nk+B~T$&4~ll32*o~4Iw za^=aRhQD``mwHMCK&r$w<7& zI>4DxX5nOtXuJM$P!LjR7tTE%%`aDW@TTzkzO#ON`x7UkQHroVj7XwYwZ|akhGMa- zM85aC#NXuN4VUOM(5tG@OZ~10SR!$AVJH)o4k#vp+-uct0@7a>E^7EPwQbw1UR}-# zW9?zmA8V9^o@M6C_!4fZ85WHnm+)s)$BZ?oNLTV$Y|_4k(KF_>i4wAb{rbWrP;#ytC93ZQDZ+MqiPM5$2<_J~Fl0Xt2!WL23#1jhWEs{{TCNBY z60hw=MR(c0o>$Hn)9tCA5~*-K@yU7}3YUDDkqfD0Gs%8doK&pd@}CkTkxgK1AK>Vo zrI&Mj9g%H@p$8Yh9z?D4k_jcbmT55hp+n-Dx=c9()+r(fKlPC2U<~5n2d$-I&=x3F zV3Ee(>joMsc=<3BlO7`^N%?t|w8?KW#R(%sQmw0(){y7bZ1>g*hXwB|o#GQOT^eLF z@epizP2W+Wp!k|UkiYC1p@QOP}Z}G+tXNh)Oy(g?tRIw{&Y>LO1T&k49aC z4-#sRr=C=dMJPK~QkKym;n>xRJZ0tw_=75uNp21I!^=-W*EU5^ac%T1lg~h-*oB}_ z=r^fQBS@*}ydVX*f+R5j4 zz5mdwSKC}qBW(*suB?1I7o+#~@`Aah*ZF#Wc)@WnOL@`L*ZKS~n$r1p!F^M`wSvyj z;d6O;kbt@FbF(KOgTJ(lho5>*9A0x?xoLDp5n&L3_;djxkk!X1+!rs1yzg3kIywZZ zt4(y+<5GRnvy7)P0MW+VX|>owc+pH2omM4Y1F>Qh>5ApXy-}JzdVX%7Ewh?%)g0a$ zJ{4zm`cbMJaJR>jtG=bQ7*%cZ3*jqDcp+!^v~p*SNF;t)t4P@HJRkFxOm+D6DNLJ$JcDCdW)^*_`Hot4c^DW=o|LFIT$tnsCcZzCCbEx z-w|QF!|dqSEkRMqKql(>mHO(@i0+H@FYeGA9HCBF_-(`|0-ZkgcxlJ|>E;M5mN-7w zg(wAHrzM&Ap*~*Ijo1-iC$x@o*L%?yHidrB*Cu=g0vpuSR|+jVS|`(Y)zw~e>sNQzd0 zyaplAu<)3xfnn&cpGC9Ew5(4PGZr$~KY*c$tF@yh>_qiXhSyR)yNwXIfjB`z#AshU zC30s_ne2%M)WO^l(ph<$XRmbG$Co?(^iXxuox6W~Dm9(a<*zcCuu8kq8o0T=5wc8c z^9~rHIJ?}>mT#-sWg$*H9fjy`6?XFhZv#d`F%~Z?woo=(f=rOyr?aKftt67tZ#z&3 zP%RqV<6@x9nX03ljcB6TIejZe6wLQt|QA~)CXiMF%rtA0167_ zsXiXthk8rsMjBF9s|VD5tloKd$Y#=6ZOm`VO|ICI*|zs?2Fci%wV|Sk+@BNVMj)Bu zQoR!U==JFRk`P#dc;9Sj)%}t9wYJj*X$dnNkeqpj+?l&cEkahY6tm?B12&Z_?O#j# z*YV5A7V&!838P}#!MOK0kiTnaFwjCphxSHq^%AAK(+vcoK1XwpCea;xOtY({B@7KCI#pNF?bkOkf zv+VaaF5;Yg7nPI(Ls?^5E`oGNQd#$pBP(rlrciDHyq@-coJC6e3c_|Nq*Ra7xI>BT z`VlJ$no-c}ZCsS}Z@$#lP^(f4^ockIok%J{N_of)Wx=jkYNIdJpATJVK5Xd6DSk$r zIeG;7Y%nF}YEd;f|M7Htmwr4LIU+(vZw#}3^Kq-=^P;!I88T7UQ~gM;rj4s*uxULX z6?nT`{?zeL?qJ;t?K1?=)oLIf?|C~Z0^-t~t-=upY`tTnHnb(ViEqAUF;tQiPjMse zOYRF1t&#pNwU2QJ;LftO;q5=>+dP<`Xdd5p1&k;F0K(ts*U8z#+QjLP!`O<3wp~6a zny;S4E7&E^E=n#^aAcuu9L;#8nx({&Iuv}(z`_tGK6Ba2nG-x&Y}T4%np(Pw>~YY; z#Q2`!BtJVj3qe?U$p(|-M{8rG?5~lRB;tCfs4Sx56^xUNsF{7{B9_+SK8@l<8LlJq$X)U==O2YouaEo6rp^R9SwAAz z>39)E`H1Y^q{o&sw;lyG=D$T`#V-wkI<2t5UHw^lvx#i=tJT=Pl2SX}Ah}6K^s%Mz z(TT5)WcO{lxdj)u#jm;o-5c>XSMOsXN=-VWM6z4qH;c>M1$=67rj8Z<(#33)PJ#t5 z5r#JMs*U->dZ)2ZMQhs|PWQ)N&kGeRwjnqTtR~i&`q|OsL}O;L;b?KZ0pY-yGY)i7 zl+f>sVxq05!E?cVxPv!(Z^HCg<3G^)Z#aD!;_`~+KyHHxyUkF<18{?*PlD<1YxU$j z#Bt*Lsmv?PqGqRro^j%GVu^6bQvCrb9fXf^JD%t36e%KP<`5vu48dc$3N|wt6UO)! z{6=KzNQ?0pv@d0%5bM%;Xk=Fpboot4cr00I4R+UH{<1JeZY`-V31n?=qDiR(Lm`$A2hcI&;MJ@zVm&QOtM?2^~`iXXxvPgX4I1fR1#5-#@ zpS`I)W6yD%4j(d$eB`jd;Q6JmBlKakYs$Dy8-thQOt}o$U9uxHS7XDG+l}SIs5e{ImC@Ue34%AwAzb> zuq_}dK2J{e-FI&NTBsOEn z3460#R$&yL>FCj8NE)}^-uUHjzrch(-Liwyglx-NL8=34)f+HhZFBq0Sr7W`mR*s& z=)86yZz>%Ia%i+r#2
SGpLAhMlkg068uIfcEzwVQ%1PVyx`!Xklyi zXRJun)KSLe!tjw-eGU5gp-usUB0;jS5Jms1|F8laj1kZ9ukiR*6K&l3*%>9UY`xCC z!~E4BZHptg3y)1LgnOA@KF?1^1q#x6%eUw6tl`OP^p#EY*1})WXK|=Kf;KoMi?T(UTUU zV?T(w@^y3G0W)$-w&pfPF4*dO)1GD1Z2~$%>$DuJv@UeR9WbMR?p2?-`8uki=9e*e zj=_98R@U0NUEpE^vbB*`!>_x*Roj&%a|*V={6wlj=;1Dgw_=klj4AQ?k9`Iusicz% z&PXn%n`+>n@ook1&lul^VM3CCtH;@Gc8j%4=uTGMhXYd*iL>ka7dyy}7||CEiKbqnoXjjFVrXX5Vg?B;W8o5mTG9amR#`&R!-3=~!qhhn|RWD%Vkb9V#fiT6cOxSmIKPiFP;6M82;NsgmEgVoi0<6B&s#Lf+~@RNF~*rBTN>z zG}xdD@$`oAx?^v1O5-@prI|QlfYI2^<-tY7^U|s$4uu;_QZBg4&ArV(8~7>MC{6Q` zZ*w&bLJ!Hk@@1P!gJ*_rz))oJ+w=|jEO*Ji3ow}%mTFkvW)cIm2>q)5IAM?yD{#}8 zT!Bb6suKnc-r7Qg6wMONUI}JUT7`oo3~5SFWzR@zb>eX`XMQ>y%ryvitiKfu7Ggfb zU1dpRO)$LMz={+uPSZzqPRbp6`#g9s0`_6J{)g(=>W(+35w(GpnaMdL>C8Fk*-Qus z4Rv^EFf70l_XO;UG&+_$mn{hJsn?Uavn6+bMU7#gT&f&EsG!sZf<-yG@tTxBX9j}>u;|9}l=#E+;A~*3enF3J zOZY|**FtKHOH(e~1oQj|8k*?TTXcY6vk;ckJB{UQDIa4Y^~9OAgu8a!4MGozQbg#g&Z2w~er z+fsEq?0yMzM0~o2>3xm@;n1~4d!>0CTJ13&IKCDfxjZo>!<%MFY`_5_U>{+I2FL-o zuXoqw?4cClPQvJAl?0zMQHH0iK}ZyWNgBbxg^Zd#NB78P_c&^`N)Ky7XM$l6f`E92 zXV%DI=7!PD9R!XFSV(3Eh}9J*Eqp<%HFq8Pj<_B?aMLp*CPBYL#;eGCi`0@brX3jy ztd;yO?a@YW0sj1RmcA6B2pb(Co60uNi=?sZI65s5pS?QY=4gp%6nclWV2~c@H_g>&Dsv zJ#=?O$m~97(^WPIAmjFnITzP^Aji})l!%jtb4O@!MQi`v<2XChsY-HHO@EoA?mXr zXbN2>oG87&%(d9%u3QV!oiIq8NGbkDVh?9Y*K5^~x6mc%s<*(UYgpQqh~-4) z4`^_(g_B?JUv@LH;J}ur9TF~ek+*CZLcNw+i;DC#jn!K zkAQpNJacw!u$pFdH)u@JwYO3GGPXFKMy18zsD$BhI-WF#m+m|NDyq|(G|5%Z+~qY+ zbdW>D{7IkzO~(n{{4UpvhOL~~rZt&_wge8uJx>?W27HI{vNPB>Hvw4RPYb~0V`4E) zIgfG|4f-bfEBfkS$C}^Zs?KdbLHBu@mVVjmHIhlQw(NtGD^Gh1SB9_2Te`Q2SN#OsExL@4{Z)!2_Y$OnXX|-r3XcD(NDGOsd|hOt!$X zACGZ>jdGCsJ-gaA6P33VN~zR{vn6#;Y(dJo2@3Y`rmxN~AS>ZHTh`-RaoBTLo}OrC zh2Sg>ED?0-XM=ulEOwd?`!HZr+UqwE_R#Qn%jIV%xi*+d&V68UkMW1!EkN!&$cAfI ztZ`e6HxZS>_|!$eEfl&Q^@jD`rQx@`*%%C{X*LP>u00SmhuOmJf2!kh*3Am)l_7b+ zkk^1gU<=+_gfzU$8hDl{4b!sYuD9xNW3|DbdztFy10{^N)W6!4rCwHA7A`>Eu;CG3 zKrG3I4zkw82*rxViSoURPk)-y|F9+9;f}I$7K!{NR23cxl+3LcT;8q^n73~!lEuSD zKfQ!;m#EoI7Ge;vumKUM!C%rT5M<{a_MbP&-mZ5S7_RD&1=bs26S_lA9v1C0G0j?i35ukjd+nQYvfku=`V%a-p2Zshj@x3Qg(f}@?i6N8bRqsia(lmDx0zn=smJ^}0&l>=ed zpgRFu&7bBxBXtvk1%$I}WMGnaVF!vqgD5U`#rohzoDih5Fmr8S2b{=Es2B#!jckON z*uBBx)3TkYC@UT5(_AFl2Oze!8zAu@yOE@;Wf({85w0Vm!w^4p^BC96Xo4Z(i9n(x zvZY=_zg}ofO+Rx@QBlgJEtT45Av(w}1~wY*t!4aRNj6!eeUgPw$=SFY3{5F4%_;R% z$5X(Ak!yxDb;7rEi?@)k{aRx4MgEItI5x{I@Wd-Au&HEg_sZGT?To%i{%Sb}`i-5_ zekQ*l==?MD(k&;A-OD1cW-cdu53K;S5A&x_f-rf;*P5Mv?0S?o3_HaA0%4mj!jC@$ za*f@Ec|o@*?jFv7SRQfxZ3zR^$6J{QH#|FJ!mO2p@a~ zc1sAqip#aC03T5P?Gwj!wE;-H>q3cw2{(qNbl@l4Vc!>XT-hv_qP0&Qp+)kEvp0D* zwFD?F8*nwBcSykOL=K)iknURTF!e-3jJ++=H}a4q1RW{_d=)a#NXQ)hu_fH;L}k&` zmA+Nvu{G&0)+q(g;%u>>m=x2PZL%*^Ldah-PgarPL3_jOit$2xrFNK`@e&!;hvN5M zz$XQ>azQoPE+_6Nc-^#y#iAMMAfRKFnh@)E)~?oZbe?UFzf!ZQu+SgxZed0UjIRQ7 zfil%t8ff~Wb@St}Tj>*ai$Gh0bQ~Y{7my;PcF75M!}4x+HltP1@8A7`uOrz2dGXe% zAx2zx6IOSc3=&|NdjrJ!paz?G4tvEQ&zd$e-``Rv%c{zZm5@3xD?53@Wm zFHec^|8EfgriuC8%kL%Azr0kuXK~+0^LqjHcL%>GegASm_kQepckovV_;=IaQ$c^3 zDiZw7^!LQj?;ie@TmP~L0EDRlfPZ7!-_8Fu7XGt2koKR<{~j3?q#@pC7XW~L|M//` - -Services are usually provided in either **remote** mode or **local** mode. - -For example, the text_translation service provided by google can be accessed on your local system at this URL: -`http://localhost:8000/text_translation/google/remote` - -### Deploy Single Image Setup -``` -docker build -t . -docker run -p : -``` - -### Multiple Image -In Multi Image Setup, each model has its own quart application and docker image. All model APIs are served via NGINX by utilizing proxies to route incoming requests to the appropriate destinations. - -There won't be any difference in accessing the models. The text_translation service provided by google can still be accessed on your local system at this endpoint: -`http://domain_name/text_translation/google/remote` - -### Deploy Multi Image Setup -For mutliple image setup, the docker-compose file and nginx configuration files are generated dynamically based on the config.json file. The following commands might require sudo privileges. - -Run this script to generate the configuration files. -``` -./generate.sh -``` -Setup NginX configuration -``` -sudo cp .conf /etc/nginx/conf.d/ -sudo nginx -t //Checks for configuration errors -sudo nginx -s reload -``` -- For **Windows** copy to `C:\nginx\conf\` and reload the server. -- For **MacOS** copy to `/usr/local/etc/nginx/` and reload the server. - -The .conf file will be present in your project root path after running the `generate.sh` script. - -Build Image -``` -sudo docker-compose -f ./docker-compose-generated.yaml build -sudo docker-compose -f docker-compose-generated.yaml up -``` - -## Adding a new model locally -To add a new model you need to create sub-directory in `src`. The subdirectory will follow `use_case/provider/mode` format. - -To deploy your application, the dockerfile must: -- copy program files -- install requirements -- build and run the application - -After you have developed and dockerized the application, to add a new model, you need to add a key pair inside models object in the config file with the following properties: - -- serviceName : Name of the service. This will also be the docker image's name. -- modelBasePath: Dockerfile location of your model. -- apiBasePath: Endpoint from where you wish to access the model. Should follow this convention: `//`. -- containerPort: Port at which the application will be hosted inside the container. -- environment: Another JSON object with key-value pairs. Should contain any relevant secrets or API_KEYS required to run the application. - -For example, if you want to add a new Text Translation AI Model from OPENAI in remote mode, you need to do as follows: -``` -{ - "serviceName": "text_translation_google", - "modelBasePath": "src/text_translation/google/remote/.", - "apiBasePath": "text_translation/google/remote", - "containerPort": 8000, - "environment": { - "OPENAI_API_KEY": "${OPENAI_API_KEY}", - "GOOGLE_CLOUD_SECRET": "${GCP_SECRET}" - } -} -``` -## Run Ansible Script - -### Requirements -- [HashiCorp Vault](https://www.hashicorp.com/products/vault) -- [Docker Swarm](https://docs.docker.com/engine/swarm/) -- Github Personal Access Token - -### Steps - -1. Create a `inventory.ini` file with target machine configuration. Target Machine is where the ansible script will run. -Here is a sample `inventory.ini` file: -``` -[swarm_manager] -ee01:ac8:1561::2 ansible_connection=ssh ansible_user=github ansible_ssh_private_key_file=/home/github/.ssh/id_rsa -``` -For More Information Refer https://www.digitalocean.com/community/tutorials/how-to-set-up-ansible-inventories and https://docs.ansible.com/ansible/latest/inventory_guide/intro_inventory.html -You may modify it according to your needs. -2. Create a Hashicorp Vault to store your secrets. You need to store the following types of secrets:(In separate Secret Paths mentioned below. This is not optional), -- Github Credentials. (Path = `secret/github`) - - `USERNAME` (Github Username) - - `PAT` (Github Personal Access Token. It should have read and write access to the contents of the repository) -- Target Machine Credentials (Path = `secret/common`) - - `DDIR` (Destination Directory, place where the files will be downloaded and generated) - - `REPONAME` (Github Repository Name. This could be Samagra-Development/ai-tools or the name of your fork.) - Please note: You don't have to add the entire Github URL, only the Repo Name. for ex: If you fork the ai-tools repo to your account. The repo URL would be something like `/ai-tools`. - This setup will only work **if** you have a restructure branch in your fork/repo with `generate.sh` and `config.json` in your root folder. Please be aware that modifying the file structure will break the code. -- Environement Secrets for the Docker Images (Path = `secret/config`) - - `DOCKER_REGISTRY_URL` (ghcr.io if you are pulling from Github Pacakges. Can be Docker Hub as well) - - `GITHUB_REPOSITORY` (ai-tools unless told otherwise) - - `OPENAI_API_KEY`, and other secrets pertaining to the docker images. This list might exapand as the project grows and supports multiple models. Please take a look at the `sample.env` for an exhaustive list of other env keys that are required by the models. - - Refer to the faq to see how to add secrets to hashicorp -3. Set the Vault Credentials in the environment of the target machine so that it can access the remotely hosted Hashicorp Vault. - - `VAULT_ADDR`: (Vault Address) - - `ANSIBLE_HASHI_VAULT_TOKEN`: (Vault Root Login Token) -You can add environment variables as follows: -``` -export VAULT_ADDR=http://x.x.x.x:8200 -export ANSIBLE_HASHI_VAULT_TOKEN=abc12345 -``` -For the Vault Credentials to persist add it to `.bashrc` -``` -echo 'export VAULT_ADDR=http://x.x.x.x:8200' >> ~/.bashrc -echo 'export 'ANSIBLE_HASHI_VAULT_TOKEN=35r3zxcc' >> ~/.bashrc -source ~/.bashrc -``` - -Alternatively, you can pass the variables during run time as Command Line Arguments using `--extra-vars` field. -Here is an example: -``` -ansible-playbook -i inventory.ini swarm.yml --extra-vars "VAULT_ADDR='http://127.0.0.1:8200' VAULT_TOKEN=abc.123" --ask-become-pass -``` - -### Additional Steps -1. Change Hosts in Ansible Playbook to the Node which is acting as the swarm manager. (Make sure a Docker Swarm is running in that machine) - -### To run the Ansible Playbook -``` -ansible-playbook swarm.yml -i inventory.ini -``` -## Contributing -Contributions to AI Toolchain are welcome! To contribute, please follow these guidelines: - -1. Fork the repository and create a new branch for your feature or bug fix. -2. Write tests for your changes. -3. Submit a pull request describing your changes and why they are needed. - -Thank you for considering contributing to AI Toolchain! +# Youtube_transcripts + +# Description +Possess the ability to parse every video on a YouTube channel or playlist, extract audio transcripts, and embed the videos in a vector database so that search and retrieval are possible. + +# Implementation Details +It will consist of the following: + +* Get the user's playlist or channel link. +* Take audio clips out of every video in the playlist or link. +* Take out the timestamps and transcripts from each video. +* Make chunks from the transcript (you may use any fancy chunking algorithm or just use basic chunks like four minutes of audio). +* Use an LLM call to summarise each video, then save each chunk separately. +* Use COLBERT to embed this in a vector database (Ragatoullie - LangChain). Make use of this as a guide. +* Turn on COLBERT retrieval and search for the embedded information. +* A search for a question returns obtain the necessary content's timestamps, a YouTube link, and similar content. + +# Product Name +AI Tools + +# Organization Name +SamagraX + +# Domain +NA + +# Tech Skills Needed +Pytorch/ Python, ML + +# Category +Feature + +# Mentor(s) +@GautamR-Samagra + +# Complexity +Medium \ No newline at end of file diff --git a/final_03.py b/final_03.py new file mode 100644 index 0000000..2598be4 --- /dev/null +++ b/final_03.py @@ -0,0 +1,159 @@ +import os +from pydub import AudioSegment +from pytube import Playlist +from openai import OpenAI +import sqlite3 +from datetime import datetime + +# Function to download audio from the playlist URL +def download_audio_from_playlist(playlist_url, download_directory): + # Create a Playlist object + playlist = Playlist(playlist_url) + + # Create the directory if it doesn't exist + if not os.path.exists(download_directory): + os.makedirs(download_directory) + + # Iterate through each video in the playlist + for video in playlist.videos: + try: + # Get the highest resolution audio stream + audio_stream = video.streams.filter(only_audio=True, file_extension='wav').first() + + # Download the audio stream + audio_stream.download(output_path=download_directory) + + print(f"Audio downloaded for video: {video.title}") + + except Exception as e: + print(f"Error downloading audio for video {video.title}: {str(e)}") + + print("Audio download complete.") + +# Function to slice audio into 4-minute chunks +def slice_audio(input_dir, file_name, output_dir, target_length): + song = AudioSegment.from_wav("%s/%s.wav" % (input_dir, file_name)) + timestamps = [] + chunks = [] + + i = 0 + while i < len(song)/(float(target_length)*1000): + seconds = float(target_length) * 1000 * i + seconds2 = float(target_length) * 1000 * (i+1) + cut = song[seconds:seconds2] + + # Store timestamp and chunk name + timestamp = datetime.now() + chunk_name = f"{file_name}-{float(target_length)}sec-{i}.wav" + cut.export(f"{output_dir}/{chunk_name}", format="wav") + + timestamps.append(timestamp) + chunks.append(chunk_name) + i += 1 + + print("Audio slicing complete.") + return timestamps, chunks + +# Function to perform speech-to-text using Whisper model +def transcribe_audio_with_whisper(audio_directory): + client = OpenAI(api_key='your_openai_api_key') # Replace with your OpenAI API key + + transcripts = [] + + # Iterate through each audio file in the directory + for audio_file_name in os.listdir(audio_directory): + if audio_file_name.endswith('.wav'): + audio_file_path = os.path.join(audio_directory, audio_file_name) + + # Open the audio file + with open(audio_file_path, "rb") as audio_file: + # Perform transcription using Whisper ASR model + transcript = client.audio.transcriptions.create( + model="whisper-1", + file=audio_file, + response_format="text" + ) + + print(f"Transcription for {audio_file_name}:") + print(transcript) + + transcripts.append(transcript) + + return transcripts + +# Function to perform text summarization using Whisper model +def text_summarization(transcripts): + client = OpenAI(api_key='your_openai_api_key') # Replace with your OpenAI API key + + summarizations = [] + + # Iterate through each transcription + for transcript in transcripts: + # Extract text from the transcription + text = transcript['data'][0]['text'] + + # Perform text summarization using Whisper model + summary = client.completions.create( + model="gpt-3.5-turbo-instruct", + prompt=f"Summarize the text: {text}", + temperature=0.7, + max_tokens=300, + top_p=1, + frequency_penalty=0, + presence_penalty=0 + ) + + if 'choices' in summary and len(summary['choices']) > 0: + summarization = summary['choices'][0]['text'] + print(f"Summarization for the transcription:") + print(summarization) + + summarizations.append(summarization) + + return summarizations + +def save_to_database(link, chunks, timestamps, summarizations): + connection = sqlite3.connect('TextSummarisation.db') + cursor = connection.cursor() + + for i, (chunk, timestamp, summarization) in enumerate(zip(chunks, timestamps, summarizations), start=1): + # Assuming you have a table named 'audio_data' with columns 'link', 'chunk_number', 'timestamp', 'summarization' + query = "INSERT INTO audio_data (link, chunk_number, timestamp, summarization) VALUES (?, ?, ?, ?)" + cursor.execute(query, (link, i, timestamp, summarization)) + + connection.commit() + connection.close() + +def main(): + global input_dir, target_length, file_name, output_dir, playlist_url, chunks, timestamps + + # Get user input for playlist URL + playlist_url = input("Enter the playlist video link: ") + + # Set parameters (you can modify these as needed) + input_dir = 'audio_downloads' + file_name = 'output' + output_dir = 'audio_chunks' + target_length = 240 # in seconds + chunks = [] + timestamps = [] + + # Download audio from the user-provided playlist URL + download_audio_from_playlist(playlist_url, input_dir) + + # Slice the downloaded audio files into 4-minute chunks + timestamps, chunks = slice_audio(input_dir, file_name, output_dir, target_length) + + # Perform speech-to-text using Whisper model + transcripts = transcribe_audio_with_whisper(output_dir) + + # Perform text summarization using Whisper model + summarizations = text_summarization(transcripts) + + save_to_database(playlist_url, chunks, timestamps, summarizations) + + print("Text Summarizations:") + print(summarizations) + +if __name__ == "__main__": + main() diff --git a/useretrivalcode.py b/useretrivalcode.py new file mode 100644 index 0000000..992caa1 --- /dev/null +++ b/useretrivalcode.py @@ -0,0 +1,82 @@ +!pip install ragatouille sqlite-utils -q +!pip uninstall --y faiss-cpu & pip install faiss-gpu -q +!pip install llm -q + +from ragatouille import RAGPretrainedModel +import re +import sqlite_utils +import llm +from datetime import datetime + +# Function to strip HTML tags from text +def strip_html_tags(text): + return re.compile(r'<[^>]+>').sub('', text) + +# Function to retrieve data from the SQL database based on user query +def retrieve_data_from_database(query): + db = sqlite_utils.Database("TextSummarisation.db") + entries = db.execute(f"SELECT * FROM blog_entry WHERE title LIKE '%{query}%' OR body LIKE '%{query}%'").fetchall() + + return entries + +# Function to perform document retrieval and summarization +def retrieve_and_summarize(query): + entries = retrieve_data_from_database(query) + + # Initialize Ragoutille model + rag = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0") + + # Extract information and index the documents + entry_texts = [] + entry_ids = [] + entry_metadatas = [] + + for entry in entries: + entry_text = entry["title"] + '\n' + strip_html_tags(entry["body"]) + entry_texts.append(entry_text) + entry_ids.append(str(entry["id"])) + entry_metadatas.append({"slug": entry["slug"], "created": entry["created"]}) + + # Index the documents + rag.index( + collection=entry_texts, + document_ids=entry_ids, + document_metadatas=entry_metadatas, + index_name="blog", + max_document_length=180, + split_documents=True + ) + + # Search for documents based on user query + docs = rag.search(query) + + # Use LLM for summarization + model = llm.get_model("gpt-4-turbo") + model.key = '' + + prompt = ' '.join([d['content'] for d in docs]) + + # Generate the summary using LLM + response = model.prompt(prompt, system=query) + + # Store the results in the database + result_summary = ' '.join(chunk for chunk in response) + youtube_link = "https://www.youtube.com/example" # Replace with the actual YouTube link + timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + db = sqlite_utils.Database("simonwillisonblog.db") + db["search_results"].insert({ + "query": query, + "summary": result_summary, + "youtube_link": youtube_link, + "timestamp": timestamp + }) + + # Display the result + print("Summary:", result_summary) + print("YouTube Link:", youtube_link) + print("Timestamp:", timestamp) + +# Example usage: +user_query = input("Enter your query: ") +retrieve_and_summarize(user_query)