From 9d962c6b63e524eab66e6067324338a7d5550fe7 Mon Sep 17 00:00:00 2001 From: Vratika Date: Tue, 24 Dec 2024 19:44:41 +0530 Subject: [PATCH] update the web code --- Dashboard/__pycache__/views.cpython-310.pyc | Bin 34421 -> 34599 bytes Dashboard/views.py | 1311 ++++++++++++++++--- 2 files changed, 1131 insertions(+), 180 deletions(-) diff --git a/Dashboard/__pycache__/views.cpython-310.pyc b/Dashboard/__pycache__/views.cpython-310.pyc index 2b96af5ef3bdc0d092cba12f3f552f57fa8ed3fd..a0bfd00f03a43ec4afca43282508975a02df9498 100644 GIT binary patch delta 14353 zcmbta34D~*wV&^s$z--9la=h5gb)~(K-iIel^{eER2;xzlKX{BGE2NO35$sX1aPTR z^eSzw&w{mT6xY;hYg=1epMABp+U2$VT9w+WwY860U+XKrmUsU58-~dkg!I8L|2f~i z=bU@)z2~00{`D61fkW!#Xikp9ME}0_h0g8Q{Zldh79Z*VZ1k~&{$lmk+a@17 z!{<;;%*4zgr_U*WT|O86S}8Bbmm~ACL%HERUtW^W?Q=`Md|y8O+Cl~4LSLcGOP8^S zio(UdVwvs;m4wIm#z@*3Dh-$U%0MTWD^wn?@Kwl+oKR)B%2y@n+)#D6##a;e_&oAE zFH{?@^VLb(9U2?1_tl5T`NoACd<`-!KQul(!8d{E0#+FEZZv);(yxgXg(ih3`zA}i z;*cq<_>}Mz-;{8pFVQFyEmTvx&o1eb(A4lW-!w_57itbQ`I<=Cn9%g_bl-HDUmBVb zp6Qz@>9WwQ@NC~~NtaWBb9{4%E?^ZQZ+Nb6uB0nN6T|a-^CXWsWc67Sq_E1UjZCZ# zT@;@0n=k2_(1P$n-@+vGBHv=-HHVh?mM{;i-EZ#KPs<3)CZog?j8Bl!7 z(o!4P_+;vGO07G?okk}VHi3B!CWuQFE3^tWkxfFXZza)_*%V2yBD#@HmGs4I8fzjW zR#Vz^HbbU06Frm7lJpujo6Vv8OW0gCkABxu-bHM_%v(qF0=5uz!q;-vDz=C%Mj{n^ zDOg;uj>lG@1DuuJIoinKD;vUSNa zuGH#SqJ>>5m0m?3>)B31vT zZDCtw-ZeyD!~Bx=6TOWEB)yI3R@Nrz0MU$T3Hi}VAMI?rq}zz@U_nVUqB~iaq%|@m z#KMwpCpyBSlHN{qH@jBS9Yk{$lXQ^iIO~ygC(*rZhorlR-pO`JI>ZvY*>z-nnDw!K z`i-#b8JXmZvOR1s{dQk$V*A*@eiIweW)7Hr*HQzgk8pFy;@2C*{ha zl2LSO)TAVCXF^KsN~PSv?vyFJQz=K-XJpEC$rSSc&$7?Sq`nl>UF`ERr9YK&H~Tl4 za(yc09`*&9l1QZ-WyfU7o)N2aFZ-g*-J9aSk9}#sDM4PlFO_sZdq6S`um{;7wak7} z^$^9i1HJ{0fZ)OG_iZ!mcue;2dBprTv3 z7GWCqDtbXnRD6_GUsAj}9`C+fi*-jMF>O_pw+6{GBcjsk6<1qlC5SV7gBFVgqYxh2{WJGC`rzXM4S;O1$~Gw%-@1hb;(Rh5k?J<=A(0v2qGw?)I@H+FaBkg;D3_G0|0WmG^kFiLA+&Xr-xTkcg=%^U4=;Cn2 z6U{K{l&wqSk;eGWc$&4+_>{GS7XB#IKL$Wo@J9gY=HFa7?W%7e@j1Y^0N(~Y53m5x z=+fl|9n!CKN31*0ruid*u%jGFQ{ zDEp8qnyaR%ZcPrq~JR+ zIHf{*B!8FE9#Mr}z1f+TBbL;Z6c3_-u{5_RE+v=#+Z6tR=&E@kAzRrQV2qPXequxO z*HC6!yW;b80;<6Kp#K8e&d=G>AAl*NxjS6OIDQj(*e-mmg(*#UCGGQvqT6F<3p4!x zEl8zR7y@T11c4{0?jJOYbyek}skT%(HMFL7yfWXYw-#yRySn3}tG75_w{Y4YsF*=D z_}b~yr(2BD&($Trq4N13McLRzvywu8KflnMQV2DBUEDHuUimp2)h6B;TeA2)aQ+F9 z(Y_Q&kot+u%YP?k*4NHI3C2GIvS>Ep7Jv*eBQ|id<*)Yr|CN``c=H`1F%8UOBm2h*n;!+(-<>_3NSBu9R?we9XLS756hAav)#(p-j zdb5lsDnM!tph)Z;UpObjqjJueEmsi#KtTxJ7S*OW6jik<)_>Y8yjBsnRa7MSub?=$ zMF!SWHhAsmT#Ur!(9(0dd6cF}nlI4O`~+nrL)0hqX#(P7QI&J`pUkz>f`Pw%hW8Ve z6U5?GTXwrxI53s|8s&6IEnwm!tuf3jCmg7S?m1u?H}w95r!3Y_fxbl?pW;zo6hE9& zt8|HzQ)+D2Lw6y+NR%|*1Yn|+TyuK^Wtt+HlQ3+`t5zT&ip<*D`$y{Th)FW z2`8C5^NFdF*i_1-JSptxa~v(`vyh;(&wR9yl^#=I+R-9XUQEheq`ZWco{I-vCD z#w$Ckj#kHOIz8CFA2avoF;l;Lw5V zh5iqi6;s64WQx5n);E={8QW)K4Sm)=+tK=Craq^{+&(uOe|OdZ7XA}j_&n+$j&&NJ z4_E+L2v`hQ0$55QoYSvSN<`=Ms;M#hd>3Q=T|08=SjEJ9iPEz=XgQ{FI&(2?Z4gI2@46 z0W%TQ9)K6H7cdU64=?~&1%M~&)_4^2GQW*dXt5CGwA7`=;!X}f*Rz@>Jw<$FMw2p8 z{C>vR#pjM>Fu4RkL&n5!C-4>+AxSpc9Y|dv*39%4T!WGwv{u$)bEf&{P2+xXVCJ=n zBj9KRR6rWujYnbd|j=yjcQm7!y zW>sTFJp)>d$d9zJLJ@#RkQf4B@bHPCx(IY@AQs<6JMc~egK}-A1DPHmJ!MP7WT3#G+wD70pWh`y|=swpeH+xy&xbCRQ(d zP`OXMzbt4)chcTz+wx^opu?+hG_TiK?eoumT(?}npR|e}EuVE+s!zOTF2}bV+H#O* z7*r_td)RmQ+j%tXM|gM-mLY(;X>lS~LHCnMMIyLjSpqu9i2G4KdWs5=jf!;L-WiKV zM%u*axxE1%477%fJyGm^lt6zb#15OvA0&0drf2$2JCHy*w1OyR0Woo9*~mu|tv#)z z{(niSv2Yj^OIj+$ftAhf5)$Vt0Up2v@%qZ~Oa^Hgv!%buoJo{TyAjk!cSL%^ts3{! z<}I@i^S9HsGnpNW2jV?3GwJxiMPjO&O92so>S_KsSx~jI6fZS@cE*EH_YFwj59$HHc_t-sWQ{Lz&OO-KsRdK` z`Pb*fq~{r}PX{%0ZiPbwS(A|>P7a;5wbHcXVzk6)k6wcQogh4c3UTC;eC7M(qeWud zRW|Y6OA40eQmrV^e4l=jSZ{l3yw*BBTH_FLR7(~S`@K+k}b@1uQ;bRw5F1}tRiw!ww*7?fcbT) z3vdGP8$d=!*l{F^(c5IEDc%UJC#W<`zk=5kC-{Gp6iy$c4S$7-(cADhhBbsWo@liBQuPnD923f2){;?TQ~IWUudnj!_l8nmVns zz~0B}Jm)^k*d`>vej`dx&4FHHy557F;oYOVjNOsk95waWMB>WX>hDHsxY13{DEt{75^@M zc=M8OKl)m{0A2qsiZ*#Bya@7pfZ>hE-=?%DQSeL1d|M8W?oFGmGWK67UfooptP{WA zR5^v7yU^~R2O=yOi5pA(;v$j&zk5!Fdw^J3Yg^9An||CMD=u@|a_!+W@Iw&!y|a zDEEiyY;-YgqGS9&pazQ_J-aO$iPL@Hh>JsayGFMxeq7%9Wixze6HQx|D1Q@t%U#L@ zk+Zd~0k!bjcr7%ZU60n`vl@U3a*w!Tt4EnEdbidVr!^$9FuB&*#qq6cCxc9DXUT{o z0sbNNPOY6bQF7&Faw+`7q55m)C^POw?w`@O?*skmKlySDyVKb-SU#t^iqB#knSjkBZ4=#{o8-J5!#et661d1O~|T`m{1 z>+Pa4uvWFIBLC7#aWHWES}bdHrO8lgg8B)h{#0H|)I+ztbZy{ir#oCmt6LOHr(5Iz z)c?Elnj_SmK}KBHl5fYcrtp*izti0*V;fap;099_7XrUn|{Q z_S(fOt#xXiVY|Dne*-MyDVw|ra?gE1CQSsL47y{Gb4Av+lJM51xe@(ouS%Q=TC+P8ZW2tVE&J!@6- z+LasR2$a_87I}qhqgN-f9l?0V*^6IX@rHtN<3R5_Xb0;30Qs$P;+1MLuT@4R-9{$J z!xh~QGmTB#D=_g@g1QlX#?jLqiqdU7i9SqK?`Tk}#j&nR(b@4vO{(U+2+fhIEHB1q ziRcLSCLAD-ka~I$Fb1>?C$e8l@mU5q2pety+z7A%_~MCrD0pi5&x9z*rQQ94$@ z(UNX$>*$Ge#q_KlJQ&vojXLs5(x*GyqakVre|JD^?W}XBIV1E{iC049S(`3i_L5yZ z+Bs2aqzY9{tsy~gPK$8>Eq!IgO2J$Yf!Grq+qB{073fx8i_|*6SU^3%3mB>7T#|Z= z!X}>zj%g@-@@NVdf9RUyz0eXHP*>p&xtym{MK+DKf|XlQ z8=S?QavHC{Jk2XE^6&$cyt_CQpw~)qKP^|2r|YTCr6bTgNyxa1J0cRjbGlM-5%=R1 z-P-U95T)f_LB-1XL9Pk?r194cGNJ*+4_1>yQ3zt2j8mz=M1)T zcD)%a_OM`lead!<$29rS6F8h;)51-j$Rir_iwF{Y z3MK1wsERjpRL_{vA|v#-UO~?ilP%~Epb-Dr7Rqo2+45tZfrM(4sj0k!vlYL-ZmXq8|{y zj#Vc3JgSGc?1E+pRGH5QEC4J5EC!?;h%dn>9ETqS$kDMBY0Ci10V^ei4q2tr>%7tQ zIfO2T_#P<^`2X~K+2z{hsXp0AH(Y1?oj0SH5j$P>IbVZ(>He9WE#BJU8TJ6iz0;#= zPBDGwHA;)PZRdbhw%!y`zH9X)83i!(=;kMc`bd?-V}m?B`IA%Jx@$?o3!aI9>j8TJ zdjZl^i}Xsv!1XXtTA85@4frOUtBB-Ox`lw};Ll2)VS{IYgI?F#mWD#n9ZMs-=-?W> zbdEK&dG}LF;{0L8utWNWtn66_Sxt&ILbB&jYY-+wD5DkCK-XHR9Gpb+tT* z+Yc5T67kCb0|dIVgVXZ+r9_QehE3@(a);~@rxN8gX9vyAFjrQz1j2IjKUvJ!Q)e~S z*<$mahD9>yoizvRUT$3SLS}^8==RhF`q!yWduCg=z@=^!AMKfLJotP8I!d5PC_Mv-cgttQ+=@KdsoU^0mv(=~y<3Wv&Jx;cU|I z54JWYpY+BW!?gJg#pKgoy$YK~x|gMAr1a#it8w_dbDjs4Zqd1~PRSQ1_SLxXsEHQ> zicI2}{WVIln7z;IEJaEgpj>=zZ{@}cB6UmmuJ~?V39=}o7(8;1@hW7E$;hHl`O@Av z$?y=1an4b9`#_B{R(J*~6ZMd@cGJO8AZFa&A4Uf>fp<1wF5q6kj{!R14ZsJ0Qveqv z3|u4nSfd9HH5Q4&IQQXsRfh_4wr$#=;QFww3%3j&1saO+ZK@P^n|oUWREF! zh|opnLNo`<|1ME^psJ8&Xqr}yRD*JfB?rnTNSJ4y$2Fa2Pf3I f%IMW2&8jvlWh|w?K2yR~Mpn^^!KK<34e-wOZ}2q_rgL{aDGeJUQ0BxwncNZ}QQN1zUANTCfhZIT8kG!ViDf=OFinn$5&+Qeyb&p&s) zdRPKG*0=vXcjnBQGiT1soSC_P=rihXKdml~XJ@-@^e_8hZ+Bf#vHcTXCjZNzmyPr@ zzM1j4#oH5h-5qo@8?)=4pog9rx-04pdKDYdPCYxC6U>pcUC#~XNCSeO|OSSSx9}K0i30%H-*F(Yj!rOv~5n zqYHuyBwe5fq6>oyB|VqqE($IpI+4u^^@`}?;9`2oW<`2sbV+cDWU}kdppyg^>kZMR z!KE^-L~o2X1)C&YsxON+2b(2brZ11K2(Cz#TNzwMT8U%^&Odg}!h zC~Xb95@}TJRctL=N2Rt+%3aSkq;j_t??%=l%kP-Pv59R?aa?`DJ_**!wveEmY%9Bp zp4Uub-^RA5*xR&Pwu4L(l6eZ!g;?^FFR6YFUu&moIxL;|6x4r1uhifQ2NzkLZIeEa@Q89jsH*`-x^u zlk^Qlcd>3s-$;t|u!y7&5Z%iTNjgNd&Z3e&NOX+FPuUV=%rL$5v72N@2hp4vlI|qh zWc`w6>@Yh*+H3468=z+w8)ToLXEz&S!}RRA%Eped#3}MRjMfRsM zr9YkWXY5XyayXsxC3crgIg(Dfo82Q*j;2%YWnZS0gc2M`CylZ($uyWw`3k#FrhJ0k z&mJHP3{iU@)Q6+Tg2&`oe~9LWL@=S)E|ky`+A(RNud;EJr0zX_A&-5HJuIc2VD{a% zz$5R{EG+gqSBM9lb>ijB0`WZ2pU+(9^>iA%E2JIOqJ5_LdFF0+(iQHDgbqivBVvuK z#uHGInXTI4NT+6q8(gpYsz?y82KWI^(UP?&2k%J-3!C9_8xM%VtTv@V{2;4BStR~7 z>tJ0SI0I=|D7ky$QEeSjzflR?cHStW?g}b)tGl9ZX_{A-k_DWx@ZpXy-;As#z_K$h zx;ve$L<5Pw zJ{~vYopGI5_5Ntg;HGfQUESQDswc)YuCY*$8PzQ__fQ76%* zpZTDqUunJv?Dqn`3>XEB0losb4{$$V9PkL>QSo3|V@D^*#{eA!Njo!4{y1JkfP;W1 z08`t|pG4YofN6Sq;LwITGi-F2}m#3{WuJSGj%!r1~&YtO3bFbAShBewc6>)V{yh;{)8#2=( zq;v9rqBN$6*Q@q>Cgq3?{=&k8u(zA?9g0`Up+A?xe`OQi-DTny|9Cct&o-C8+Gl~zo+zo zneali>cCP0B{R1v7(ImPLZ*O$(H1R~+|KE_9Bp%HSO{{|H>awy%Qx)isfs!qlE zfy=?mD7}+@)UMUr3KFjp5y)wiQ}H<$eERbIj*82HN44`fg%h;O$cZRP8L9c~!#bMQ zd|pA?%-X3k_dDP?i>fLV5p$QX#}EprZG&DG4KJl%gIpz5aLg2C*ooyQkU`bic-5Kr z0#7=er$FB<9&Yd}&x+?8sufNAx}nN-6r%I_axr)5Y2WmPPCT}>NZqcA?=3Zz4dT5( zg}PW3zj6n}sm5y(L-t+|zlvoIJ52A8jk%d;$PrOSC{klyY)x4%^RYQBkL9xhHuvtV z;fx{skewBl*jUk_j1t?hbI8exOKhXrW47VUbY96J;!APaHrj~KHRNKY#FsN>8_gx& zEYmkS$4un0vJquW8Fmk4_2v(`v2naj9nPY{DdygMGmo~AWh1JUCkuOqJfj6e8C1?Y zWFMW&O2-sbHd;u+i%57j2`^@)7fMD@Lyqa|Eg8xg%4VgkiurGI3}+8zCs3@nltfpv zn%h*xhHMg42H`}{TWd4R%?eN@$Rq9%bwnA?H7k3oM*U`WZw;eO>X?1l$85uM2L0x| zAs?$7QE#6!oHvv=T04dfy^Yo1Y$qGc*+aC0{zmMIE#_*n8Ok}aZdpk}ntmvAC~I{7 zm~F@_F?VPV6+X{O-r2yb#nsK_RHUc5*+&N*Hjb(2 zU7E4a2b|*B=4FYGBXJ`^ppL_X`2e66kdA%%AYQ8gp8yO2HUc(K!DI$D3j^Fv2}!3J z$9m5{g%`IG=M=qCJbIL4&?GaqOS&GVGCA3ViSY7zrB<9?K5xV9vm;dW0d6HBG{od0 zAUokUO5p2}wNd816J1Xi}XufSf6iNh9Rpe@Z1|$kaw~a~(MXk1tmC<2sWN=#$}7I$Xlj8DaM!`?ehdb% z$|Xm-^dHEj&nb)fc62cPXyFsey3I~&tco4$cGk`?POad7E!eU7(J)(HyONnXDIRa*ysZxHK4{88FhkrKyEOIY@Fu}f-rRfyVCPxgv8EHMr z9>NF`y8#$noQ@uCyo(^|3>)S^pT-%IPXlg1-Y_USfTXQ>uR+_l4Avj*GvqK0lv=%b zyZBB^nKK*QMdIz2g_gB$6LU6g_k0Sew*poPZBy0GASC64b=r066!Xw(&AMcVCY=NH z5{vUsgIPuu6QPBhhhs)OiYe_bbSpN6KD&5n(-X?QV(eoD;)|QFccKGn)As)6O-oQR zpl}#8VC^gT9T$t0k?4GfIM}*kSGpGhb}q-c9O81YPtzq|ZZxsG33c&!G=yoQhTPl0 zH0q;JM@CqqQmu2XoAyEmDKU!rWo%`)tcLVt7Hm3ct2EK%oWo%r33uq$KFp{`4JeK| z#HI4D$UuZnHlElrZ|0MYj{XiRy^P9PVaI*q<+gHBxOKY^Q5oL=C?8Kohrn;+wQYE&+0d1JRt7gUbj~^ zUiP3|RAu&`L$>V8(_qBhd%+&#$eJEoQhqYqvO>tRNz5TichoA+Q2Wcp{vCHVk3rC1 zpzxPLjRK|~NL;j@i2AF8mva6ZRDiO<{lw!KsN#yhmzB41Xw|-TnpXUI)bs*iT1$&jNp9$f z)^Ua^;{O1am!}Ng9Z0(L!TVTSvFE!GB?s?6P7!tW<%%luE_*&L>qj&Zl&|NHLdau) zCje&v(;AgCWuP~`0ukQ?-zzk>o=Q9ga-v!EMr%J{(pUq_w+RAS{4YUI47=Ji=L|i| zp9b@Eb*2opCiN|c`02H^%B$$ND)bxwAz;Qn5^HyrdfrC^WY_$7qRdOPm$_$GrPoFW z%2t`z6j4!Fmf&}j;(`1&KEv^7Qr+3|A*Z8g@ne9m10Dxl)!m(Cr0? zd40-+D>u$RM*1aBI$-{Zlmw77?N32X>w+wtuVO?utw!BvB{LaN9#4q;-E$MarX-wB zNFDwI`IoQ5dnook;6DM=beMdc*R%_2{1#k4m1>CU!lE|*-;@w2NGb6@$h|Bjeuok= zHabU?cyV6ol7B|h9{^IDx6_n-#bJ&NjZQ$Fe-XReH|Vm&|EBq7D{)em0Gl-b|ADem zQ#fssFK+sU!!PuJj*^++dgJ2Wms`%uYP76g9_-Az&gM)xFlxF33Y>+)Z;}?`OV_`c zkiBj~r^~9lpGvgz!zd@!z&0k}nRUx#Mf`(cKY{9LSX--gz*@aOft=}HV{LTgKBwv7 z4B^|eQu*1%cDuZ#8uea+-8`xF7rpj_w9(!oEot0{*V=MuO?74Rzk}MYJN6bX`Q8i- z$ez5GHvJr8egL+&(TFX7MPwj1?_|fhcan%9FDO_%#>Y3(Y*|j zorlAEKkZMaR+TL5)?ykDn_38kXvbi&Ih^9HeZ|VVXMVRYpeT2Vn*E=y`lw*)h5f4% zpF}^k0*U}~d*PnlyQDik3r@NFJ`D;{VIbQIH{KvRnIX?QX1Z^X@{>7qV;ScmoF8qV zJ+s0824WB-B{MtYF_W$&XN)I(T^e2Hgm7JGwR(wDJa@xJFJb7T1(tH<6{5$3HUjTjy_^3{(I=G%xKXCBNiDD{E7ghttP5OroaM=aF zhiDmA$}UJBg-q^;S#N!;f*(Kx>a;e@?k1~OI^%RxTZ+7irlZx#T+%BQ%F6)wW~X>G zyi>KQ;spxmi#qOTM$nT~nk=E`Afko}p37~E`{|;Y?j!tNbdAg?DwfsafKQz5C`cgs zN#rf$DmBDTKyKv#J>gavc%3Okb0TA!{fejSLD%kG;eOqG2KvA` zRXV0~D8lUY*}2=&=Oq^Fl(p1Q{v=g0r>lZN$R+0_hnPW&{LUV&^HAu5F4D3;bc!*p zRPl(XwSi1IWt~&R+OFCbStcW-IR42N~Z^T$viTbKjJl$34^g^4b z#M!RbY6{c7;UnL`Y~iQn^KgV|jIT!(MdJ1Dvdl+8ZdAk{y8Ve(RGOX4*t&h^mfdoc zN% zHiLf;4Sq;aGox#{`}_1beJ4Oge@>0;sZ)xCFIq0n_qwjPTn=7y#s5VP zColr!`4D}o;6rjP7-Uz@!D}8Mfwbd*6M%^ex)FL!x}xDj8jElP&5OqAr~+qCNoQwI zf9#Nv%s9d$rZ#T1kEjs)m9S7$9jZ~)OtJaULZzM>QqfdQ z1p?V^)(LeDIA?4O)GQ$wTp$iatHqm#T(zZ0D+81RDgZTrsr6Om?7phCoZ7?}$V}9_ z_;P9$34PV2RhHBjc~+)tT6p<2?TfAqTuQUhE%U`&dZ|c6J)UdRRW7>xDw|@9FRgJW zVt!9=x~9c{ROhV_Z^WxaNxbNiTe(mu@r9mK>3Wx3ehXiVueh`pHueQP_oi!Vxcr(P z?OT4y4Vy;|D_(JRn-=Nr>4^7p<6G&Pmy(~#Egf~U{6{%9H~lu#&nQ>K`L6Y1V`O<^ zFx$-Nb&k4+>|+XVH8V!(Mskd9*7w+AB@UaJJ#av=4LcONIWTjWd&qH@?H;;7ME*Xy z3>i*KDD^xY&^vhZ<@)1sF>02ry2R;(A15pS4yx5i)x>|_ zRIMBrKR8@&H=dIocc!0zN?ANx52n@}YI`!XpF*opgbj*Cre7HpJI%Gtdq}!GejD^y zpHcd^x32NaO8E)oK}|aC{R~xZ-MHQ^UNBc3+K4<{Z{TwYT7)`H`XUdqbFQaKq@T!u zPhMjU`ckstJ~BB_HoIfUFdd`Hinfl5cz@NVO1y6b%(^C?L@pM_NzNlLV=R{uZyc%I zLW?6s0(7%IpZF-!t^+%kM!p#O$M>Yv7{hAzgfm%wBvu^tPgy$;9`&nhT;k-> z14@f{`{;;sg%l=w2evJe4z~fdrLK>pC8xTfJR=!%i8lu}CS-&uV*%;&vfYEwQflm& z(35XQB^v&V~3+z%f80G^JSCeY|SB96Zu}(yfmmiSQM+)J-o`HrgI+N8*P6NS~EKJEZ z>C+HBY>I;;S>lZoH;amq`5vDv56BY_pRB3RM*&CQfH`O_F$LH#(G4wq#7ZC59csY0 z=LVlEK08w3OB>lbbtn}to~%;J#4{sRO1XG*q&!gpVcFL7*AS)swQk7oSdz{@I>HGX z)~))ju;C)8&i{$y3674#Os zCfEbMHEAA{!)+mDBwcIC>ixR5o&?woM(Cwe8&~SjqP+Au3{vo%__I<9WZpz7IO(lg zGDMjzX#>X2J;wd1oJTf*fKH{KGNyAbzmE>&GX ppETxDZnn$q$`)_lvY^~mO0Tu_YoXtMZv$0HpZ<7N*OfWS{{x%ouwDQF diff --git a/Dashboard/views.py b/Dashboard/views.py index aa09ab6..566f938 100644 --- a/Dashboard/views.py +++ b/Dashboard/views.py @@ -5,6 +5,7 @@ import io import csv import json from django.http import HttpResponse +from django.http import HttpResponseForbidden import os import botocore from django.http import JsonResponse @@ -12,16 +13,22 @@ from django.conf import settings import re import random from cryptography.fernet import Fernet -from django.template.loader import render_to_string from django.http import JsonResponse from Accounts.models import UserProfile from django.contrib.auth.models import User from django.contrib.auth.decorators import login_required from django.views.decorators.cache import never_cache +from django.contrib.sessions.models import Session +from rest_framework.authentication import SessionAuthentication, BasicAuthentication +from django.views.decorators.csrf import csrf_exempt +from rest_framework.decorators import api_view, authentication_classes, permission_classes +from rest_framework.permissions import IsAuthenticated from .models import* from .serializers import* from rest_framework import generics from django.shortcuts import render,redirect,get_object_or_404 +from django.http import HttpResponse +from django.template.loader import render_to_string from django.views.decorators.csrf import csrf_exempt import pytz from datetime import datetime @@ -34,7 +41,6 @@ import csv from rest_framework.decorators import api_view from django.core.files.storage import default_storage from django.core.files.base import ContentFile -from Device .models import Devices import pandas as pd import time @@ -69,15 +75,52 @@ def home(request): def navbar(request): return render(request, 'navbar/nav.html') -#=========================================================================================== +#================================================================ @never_cache +@csrf_exempt @login_required(login_url='login') -# def ddos(request): +# def ddos(request, device_id=None): +# # Get the logged-in user's profile +# user_profile = request.user.userprofile +# # Fetch the list of devices associated with the logged-in user +# logged_in_devices = Devices.objects.filter(used_by=user_profile).order_by('-created_at') + +# # Debugging: Print the list of devices +# print(f"Logged-in user's devices: {logged_in_devices}") + +# # Check if the user has more than one device +# has_multiple_devices = logged_in_devices.count() > 1 + +# # If the frontend passed a device_id and the user has multiple devices, use it +# if device_id and has_multiple_devices: +# try: +# # Fetch the device from the database that belongs to the logged-in user +# device = get_object_or_404(Devices, id=device_id, used_by=user_profile) +# print(f"Using passed device with ID: {device.id}") +# logged_in_device_id = device.id +# logged_in_device_name = device.device_name +# except Devices.DoesNotExist: +# print(f"Device with ID {device_id} not found or doesn't belong to the logged-in user.") +# return HttpResponseForbidden("You do not have permission to view this device's data.") +# else: +# # If no device_id is passed or user has only one device, get the most recent device +# if logged_in_devices.exists(): +# logged_in_device = logged_in_devices.first() # Get the most recent device +# logged_in_device_id = logged_in_device.id +# logged_in_device_name = logged_in_device.device_name +# print(f"Using the most recent device: {logged_in_device_name} (ID: {logged_in_device_id})") +# else: +# # If no devices found, set ID as None and name as "Unknown Device" +# logged_in_device_id = None +# logged_in_device_name = "Unknown Device" +# print("No devices found for the logged-in user.") + +# # Path to your DDoS prediction CSV file # file_path = 'media/ddos_predictions/predictions.csv' # data = pd.read_csv(file_path) -# # Create a mapping for protocol names to their short forms +# # Create a mapping for protocol names to their short forms # protocol_mapping = { # "Protocol_ICMP": "ICMP", # "Protocol_TCP": "TCP", @@ -93,7 +136,88 @@ def navbar(request): # "Protocol_DNS": "DNS" # } +# # Sum up the DDoS-related columns +# ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur'] +# ddos_sums = {col: int(data[col].sum()) for col in ddos_columns} +# ddos_sums['byteperflow'] /= 15 +# ddos_sums['tot_kbps'] /= 15 +# # Get the source and destination IP counts +# src_ip_counts = data['src_ip'].value_counts() +# src_ip_dict = src_ip_counts.to_dict() + +# dest_ip_counts = data['dst_ip'].value_counts() +# dest_ip_dict = dest_ip_counts.to_dict() + +# # Get protocol counts +# protocol_columns = data.columns[7:19] +# protocol_counts = {} +# for protocol in protocol_columns: +# short_form = protocol_mapping.get(protocol, protocol) # Default to the original name if not found +# protocol_counts[short_form] = int((data[protocol] == 1).sum()) + +# # Filtered data where probability > 0.9 +# filtered_data = data[data['probability'] > 0.9] +# src_ip_counts2 = filtered_data['src_ip'].value_counts() +# src_ip_dict2 = src_ip_counts2.to_dict() + +# # Pass the logged-in user's device information (device ID and device name) to the template +# return render(request, 'ddos/ddos.html', { +# 'ddos_sums': ddos_sums, +# 'src_ip_dict': src_ip_dict, +# 'dest_ip_dict': dest_ip_dict, +# 'protocol_counts': protocol_counts, +# 'src_ip_dict2': src_ip_dict2, +# 'logged_in_device_id': logged_in_device_id, # Device ID passed here +# 'logged_in_device_name': logged_in_device_name, # Device name +# 'has_multiple_devices': has_multiple_devices # Pass this flag to the frontend +# }) + +# def ddos(request): +# print("Inside the ddos view...") + + + +# # Construct the file path for the selected device's DDoS predictions +# response_data=None +# if request.method=="POST": +# try : +# response_data= json.loads(request.body) +# except json.JSONDecodeError: +# return JsonResponse({'error':'invalid json'}) +# if response_data: +# file_path = 'media/ddos_predictions/predictions.csv' + +# else : +# print('have no csv file ') + + + + +# file_path = f'media/ddos_predictions/predictions.csv' + +# # Read the CSV file for the selected device +# try: +# data = pd.read_csv(file_path) +# except FileNotFoundError: +# print(f"File not found: {file_path}") +# return HttpResponse( status=401) + +# # Process the data and generate DDoS statistics (as before) +# protocol_mapping = { +# "Protocol_ICMP": "ICMP", +# "Protocol_TCP": "TCP", +# "Protocol_UDP": "UDP", +# "Protocol_HTTP": "HTTP", +# "Protocol_HTTPS": "HTTPS", +# "Protocol_SSH": "SSH", +# "Protocol_DHCP": "DHCP", +# "Protocol_FTP": "FTP", +# "Protocol_SMTP": "SMTP", +# "Protocol_POP3": "POP3", +# "Protocol_IMAP": "IMAP", +# "Protocol_DNS": "DNS" +# } # ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur'] # ddos_sums = {col: int(data[col].sum()) for col in ddos_columns} @@ -103,32 +227,138 @@ def navbar(request): # src_ip_counts = data['src_ip'].value_counts() # src_ip_dict = src_ip_counts.to_dict() - # dest_ip_counts = data['dst_ip'].value_counts() # dest_ip_dict = dest_ip_counts.to_dict() - # protocol_columns = data.columns[7:19] # protocol_counts = {} # for protocol in protocol_columns: -# short_form = protocol_mapping.get(protocol, protocol) # Default to the original name if not found +# short_form = protocol_mapping.get(protocol, protocol) # protocol_counts[short_form] = int((data[protocol] == 1).sum()) -# print(protocol_counts) # filtered_data = data[data['probability'] > 0.9] # src_ip_counts2 = filtered_data['src_ip'].value_counts() # src_ip_dict2 = src_ip_counts2.to_dict() +# # Return the response with the DDoS data +# try: +# return render(request, 'ddos/ddos.html', { +# 'ddos_sums': ddos_sums, +# 'src_ip_dict': src_ip_dict, +# 'dest_ip_dict': dest_ip_dict, +# 'protocol_counts': protocol_counts, +# 'src_ip_dict2': src_ip_dict2, + +# }) +# except Exception as e: +# print(f"Error rendering template: {e}") +# return HttpResponseForbidden("Internal Server Error occurred while rendering the page.") +#================================================================ - +# @never_cache +# @csrf_exempt +# @login_required(login_url='login') +# def ddos(request): +# print("Inside the ddos view...") +# device_id = None # Initialize device_id +# print('device id',device_id) - +# if request.method == "POST": +# try: +# # Try to load the request body as JSON +# response_data = json.loads(request.body) +# if 'device_id' in response_data: +# device_id = int(response_data['device_id']) # Set the device_id from POST +# print(f"Device ID received from POST: {device_id}") +# else: +# return JsonResponse({'error': 'Device ID is required'}, status=400) +# except json.JSONDecodeError: +# return JsonResponse({'error': 'Invalid JSON'}, status=400) +# # Only fetch the recent device if no device_id was set from POST +# if not device_id: +# print("No device ID found from POST data. Fetching the latest device for the logged-in user.") +# recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() +# if recent_device: +# device_id = recent_device.id +# print(f"Fetched recent device ID: {device_id}") +# else: +# print("No devices found for the logged-in user.") +# return JsonResponse({'error': 'No devices found for the logged-in user'}, status=404) - +# # Construct the file path for the device's DDoS prediction CSV file +# file_path = os.path.join('media', 'ddos_predictions', str(device_id), 'predictions.csv') +# print(f"Constructed file path: {file_path}") -# return render(request, 'ddos/ddos.html',{'ddos_sums': ddos_sums,'src_ip_dict' : src_ip_dict , 'dest_ip_dict' : dest_ip_dict , 'protocol_counts' : protocol_counts,'src_ip_dict2' : src_ip_dict2}) +# # Check if the file exists +# if not os.path.exists(file_path): +# print(f"File not found at path: {file_path}") +# return JsonResponse({'error': f"File not found for device ID {device_id}"}, status=404) + +# # Attempt to read the CSV file +# try: +# data = pd.read_csv(file_path) +# print(f"Data loaded successfully. First rows:\n{data.head()}") +# except pd.errors.EmptyDataError: +# print(f"CSV file is empty: {file_path}") +# return JsonResponse({'error': 'CSV file is empty'}, status=400) +# except Exception as e: +# print(f"Unexpected error reading CSV: {e}") +# return JsonResponse({'error': 'Error reading the CSV file'}, status=500) + +# # Process the CSV data +# protocol_mapping = { +# "Protocol_ICMP": "ICMP", +# "Protocol_TCP": "TCP", +# "Protocol_UDP": "UDP", +# "Protocol_HTTP": "HTTP", +# "Protocol_HTTPS": "HTTPS", +# "Protocol_SSH": "SSH", +# "Protocol_DHCP": "DHCP", +# "Protocol_FTP": "FTP", +# "Protocol_SMTP": "SMTP", +# "Protocol_POP3": "POP3", +# "Protocol_IMAP": "IMAP", +# "Protocol_DNS": "DNS" +# } + +# ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur'] +# ddos_sums = {col: int(data[col].sum()) for col in ddos_columns} +# ddos_sums['byteperflow'] /= 15 +# ddos_sums['tot_kbps'] /= 15 + +# src_ip_counts = data['src_ip'].value_counts() +# src_ip_dict = src_ip_counts.to_dict() + +# dest_ip_counts = data['dst_ip'].value_counts() +# dest_ip_dict = dest_ip_counts.to_dict() + +# protocol_columns = data.columns[7:19] +# protocol_counts = {} +# for protocol in protocol_columns: +# short_form = protocol_mapping.get(protocol, protocol) +# protocol_counts[short_form] = int((data[protocol] == 1).sum()) + +# # Filter data where the probability is above 0.9 +# filtered_data = data[data['probability'] > 0.9] +# src_ip_counts2 = filtered_data['src_ip'].value_counts() +# src_ip_dict2 = src_ip_counts2.to_dict() + +# # Return the response with the DDoS data +# try: +# return render(request, 'ddos/ddos.html', { +# 'ddos_sums': ddos_sums, +# 'src_ip_dict': src_ip_dict, +# 'dest_ip_dict': dest_ip_dict, +# 'protocol_counts': protocol_counts, +# 'src_ip_dict2': src_ip_dict2, +# }) +# except Exception as e: +# print(f"Error rendering template: {e}") +# return HttpResponseForbidden("Internal Server Error occurred while rendering the page.") +@never_cache +@csrf_exempt @login_required(login_url='login') def ddos(request): print("Inside the ddos view...") @@ -240,7 +470,106 @@ def ddos(request): -#================================================================================ + + + + + + + +# def DdosApi(request): +# print("Inside the ddos view...") + +# device_id = request.GET.get('device_id', None) +# print(f"Device ID from headers: {device_id}") + +# if not device_id: +# print("No device ID provided. Fetching the latest device for the logged-in user.") +# recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() +# if recent_device: +# device_id = recent_device.id +# print(f"Fetched recent device ID: {device_id}") +# else: +# print("No devices found for the logged-in user.") +# return JsonResponse({'error': 'No devices found for the logged-in user'}, status=404) + +# try: +# device_id = int(device_id) +# print(f"Using device ID: {device_id}") +# except ValueError: +# return JsonResponse({'error': 'Invalid device ID'}, status=400) + +# try: +# device = Devices.objects.get(id=device_id) +# device_pod = device.pod +# print(f"Device Pod: {device_pod}") +# except Devices.DoesNotExist: +# return JsonResponse({'error': f"Device with ID {device_id} not found"}, status=404) + +# file_path = os.path.join('media', 'ddos_predictions', str(device_id), 'predictions.csv') +# print(f"Constructed file path: {file_path}") + +# if not os.path.exists(file_path): +# print(f"File not found at path: {file_path}") +# return JsonResponse({'error': f"File not found for device ID {device_id}"}, status=404) + +# try: +# data = pd.read_csv(file_path) +# print(f"Data loaded successfully. First rows:\n{data.head()}") +# except pd.errors.EmptyDataError: +# print(f"CSV file is empty: {file_path}") +# return JsonResponse({'error': 'CSV file is empty'}, status=400) +# except Exception as e: +# print(f"Unexpected error reading CSV: {e}") +# return JsonResponse({'error': 'Error reading the CSV file'}, status=500) + +# protocol_mapping = { +# "Protocol_ICMP": "ICMP", +# "Protocol_TCP": "TCP", +# "Protocol_UDP": "UDP", +# "Protocol_HTTP": "HTTP", +# "Protocol_HTTPS": "HTTPS", +# "Protocol_SSH": "SSH", +# "Protocol_DHCP": "DHCP", +# "Protocol_FTP": "FTP", +# "Protocol_SMTP": "SMTP", +# "Protocol_POP3": "POP3", +# "Protocol_IMAP": "IMAP", +# "Protocol_DNS": "DNS" +# } + +# ddos_columns = ['pktcount', 'byteperflow', 'tot_kbps', 'rx_kbps', 'flows', 'bytecount', 'tot_dur'] +# ddos_sums = {col: int(data[col].sum()) for col in ddos_columns} +# ddos_sums['byteperflow'] /= 15 +# ddos_sums['tot_kbps'] /= 15 + +# src_ip_counts = data['src_ip'].value_counts() +# src_ip_dict = src_ip_counts.to_dict() + +# dest_ip_counts = data['dst_ip'].value_counts() +# dest_ip_dict = dest_ip_counts.to_dict() + +# protocol_columns = data.columns[7:19] +# protocol_counts = { +# protocol_mapping.get(protocol, protocol): int((data[protocol] == 1).sum()) +# for protocol in protocol_columns +# } + +# filtered_data = data[data['probability'] > 0.9] +# src_ip_counts2 = filtered_data['src_ip'].value_counts() +# src_ip_dict2 = src_ip_counts2.to_dict() + +# return JsonResponse({ +# 'device_pod': device_pod, +# 'ddos_sums': ddos_sums, +# 'src_ip_dict': src_ip_dict, +# 'dest_ip_dict': dest_ip_dict, +# 'protocol_counts': protocol_counts, +# 'src_ip_dict2': src_ip_dict2, +# }, status=200) + + +# ================================================================================ @never_cache def read_tx_bytes(request): @@ -474,12 +803,15 @@ def processes_log(request): def dma(request): return render(request, 'dma/dma.html') + + + # def get_combined_files(): # df1 = pd.read_csv('media/malware_predictions/bytes_predictions_KNeighborsClassifier.csv') # df2 = pd.read_csv('media/malware_predictions/bytes_predictions_RandomForestClassifier.csv') -# df3 = pd.read_csv('media/malware_predictions/latest_malware_bytes_predictions_SGD.csv') -# df4 = pd.read_csv('media/malware_predictions/latest_malware_bytes_predictions_XGB.csv') +# df3 = pd.read_csv('media/malware_predictions/bytes_predictions_SGDClassifier.csv') +# df4 = pd.read_csv('media/malware_predictions/bytes_predictions_XGBClassifier.csv') # # df1 = pd.read_csv('media/temp/bytes_predictions_KNeighborsClassifier.csv') @@ -578,6 +910,296 @@ def dma(request): # combined_data2.at[i,'Prediction Probability'] = probs[max_index] +# combined_data = pd.concat([combined_data1, combined_data2], ignore_index=True) + +# return combined_data + + +# @login_required(login_url='login') +# @never_cache +# def malware(request): +# combined_data = get_combined_files() + +# class_names = { +# 1: "Ramnit", +# 2: "Lollipop", +# 3: "Kelihos_ver3", +# 4: "Vundo", +# 5: "Simda", +# 6: "Tracur", +# 7: "Kelihos_ver1", +# 8: "Obfuscator.ACY", +# 9: "Gatak" +# } + + +# high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9] +# files_list = high_probability_files['File'].tolist() + + +# files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)] +# frequency = files70_90['Predicted Class'].value_counts().sort_index() +# complete_index = pd.Index(range(10)) +# frequency = frequency.reindex(complete_index, fill_value=0) +# print(frequency,'in the frequency') +# # if frequency: +# # print("Check_malware_frequency") + + +# all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index() +# frequency_with_names = all_frequency.rename(class_names) +# print(frequency_with_names,'with name') + + +# avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index() +# all_classes = pd.DataFrame({'Predicted Class': range(1, 10)}) +# avg_probability = pd.merge(all_classes, avg_probability, on='Predicted Class', how='left') +# avg_probability['Prediction Probability'].fillna(0, inplace=True) +# avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names) +# average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability'])) +# print(average_probability_dict,"avg is here ") + +# file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt') +# data = None +# try: +# with open(file_path, 'r') as file: +# data = file.readlines()[::-1] # Reverse lines for latest logs +# except: +# pass + + + +# return render(request, 'malware/malware.html', {'files_list': files_list , 'frequency' : frequency.to_dict() , 'class_frequency' : frequency_with_names.to_dict() , 'average' : average_probability_dict ,"logs":data}) + + +# def get_combined_files(device_id): + +# base_dir = os.path.join("media", "malware_predictions", str(device_id)) +# # base_dir = f"media/malware_predictions" +# # Construct file paths using base_dir +# file1_path = os.path.join(base_dir, 'bytes_predictions_KNeighborsClassifier.csv') +# file2_path = os.path.join(base_dir, 'bytes_predictions_RandomForestClassifier.csv') +# file3_path = os.path.join(base_dir, 'bytes_predictions_SGDClassifier.csv') +# file4_path = os.path.join(base_dir, 'bytes_predictions_XGBClassifier.csv') + + +# df1 = pd.read_csv(file1_path) +# df2 = pd.read_csv(file2_path) +# df3 = pd.read_csv(file3_path) +# df4 = pd.read_csv(file4_path) +# # df1 = pd.read_csv('media/temp/bytes_predictions_KNeighborsClassifier.csv') +# # df2 = pd.read_csv('media/temp/bytes_predictions_RandomForestClassifier.csv') +# # df3 = pd.read_csv('media/temp/bytes_predictions_SGDClassifier.csv') +# # df4 = pd.read_csv('media/temp/bytes_predictions_XGBooster.csv') + +# # Step 2: Create a new DataFrame to hold combined results +# combined_data1 = pd.DataFrame() + +# # Step 3: Combine predictions +# combined_data1['File'] = df1['File'] # Assuming all files are the same +# combined_data1['Predicted Class'] = df1['Predicted Class'] # Placeholder +# combined_data1['Prediction Probability'] = 0.0 # Initialize probability column +# max_length = max(len(df1), len(df2), len(df3), len(df4)) +# # Step 4: Loop through each row and calculate the highest probability and average +# # for i in range(len(df1)): +# # # Get probabilities from all models +# # probs = [ +# # df1['Prediction Probability'][i], +# # df2['Prediction Probability'][i], +# # df3['Prediction Probability'][i], +# # df4['Prediction Probability'][i], +# # ] + +# # # Get predicted classes +# # classes = [ +# # df1['Predicted Class'][i], +# # df2['Predicted Class'][i], +# # df3['Predicted Class'][i], +# # df4['Predicted Class'][i], +# # ] + +# # # Find the index of the highest probability +# # max_index = probs.index(max(probs)) + +# # # Set the highest predicted class +# # combined_data1.at[i, 'Predicted Class'] = classes[max_index] + +# # # Calculate the average probability +# # combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs) + +# for i in range(max_length): +# probs, classes = [], [] + +# for df in [df1, df2, df3, df4]: +# try: +# probs.append(df['Prediction Probability'].iloc[i]) +# classes.append(df['Predicted Class'].iloc[i]) +# except IndexError: +# # Skip if the row does not exist in this DataFrame +# pass + +# if probs and classes: +# max_index = probs.index(max(probs)) +# combined_data1.at[i, 'Predicted Class'] = classes[max_index] +# combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs) + +# file5_path = os.path.join(base_dir, 'asm_prediction_KNeighborsClassifier.csv') +# file6_path = os.path.join(base_dir, 'asm_prediction_LogisticRegression.csv') +# file7_path = os.path.join(base_dir, 'asm_prediction_RandomForestClassifier.csv') +# file8_path = os.path.join(base_dir, 'asm_prediction_XGBClassifier.csv') +# df5 = pd.read_csv(file5_path) +# df6 = pd.read_csv(file6_path) +# df7 = pd.read_csv(file7_path) +# df8 = pd.read_csv(file8_path) + +# combined_data2 = pd.DataFrame() + +# # Step 3: Combine predictions +# combined_data2['File'] = df5['File'] # Assuming all files are the same +# combined_data2['Predicted Class'] = df5['Predicted Class'] # Placeholder +# combined_data2['Prediction Probability'] = 0.0 # Initialize probability column + +# # Step 4: Loop through each row and calculate the highest probability and average +# for i in range(len(df5)): +# # Get probabilities from all models +# probs = [ +# df5['Prediction Probability'][i], +# df6['Prediction Probability'][i], +# df7['Prediction Probability'][i], +# df8['Prediction Probability'][i], +# ] + +# # Get predicted classes +# classes = [ +# df5['Predicted Class'][i], +# df6['Predicted Class'][i], +# df7['Predicted Class'][i], +# df8['Predicted Class'][i], +# ] + +# # Find the index of the highest probability +# max_index = probs.index(max(probs)) + +# # Set the highest predicted class +# combined_data2.at[i, 'Predicted Class'] = classes[max_index] + +# # Calculate the average probability +# # combined_data2.at[i, 'Prediction Probability'] = sum(probs) / len(probs) +# combined_data2.at[i,'Prediction Probability'] = probs[max_index] + + +# combined_data = pd.concat([combined_data1, combined_data2], ignore_index=True) + +# return combined_data +# def get_combined_files(device_id): + + # df1 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_KNeighborsClassifier.csv') + # df2 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_RandomForestClassifier.csv') + # df3 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_SGDClassifier.csv') + # df4 = pd.read_csv(f'media/malware_predictions/{device_id}/bytes_predictions_XGBClassifier.csv') +# file_path = f'media/malware_predictions/{device_id}/bytes_predictions_KNeighborsClassifier.csv' +# print('df1........hukjkjjnkjn:',file_path) + +# # df1 = pd.read_csv('media/temp/bytes_predictions_KNeighborsClassifier.csv') +# # df2 = pd.read_csv('media/temp/bytes_predictions_RandomForestClassifier.csv') +# # df3 = pd.read_csv('media/temp/bytes_predictions_SGDClassifier.csv') +# # df4 = pd.read_csv('media/temp/bytes_predictions_XGBooster.csv') + +# # Step 2: Create a new DataFrame to hold combined results +# combined_data1 = pd.DataFrame() + +# # Step 3: Combine predictions +# combined_data1['File'] = df1['File'] # Assuming all files are the same +# combined_data1['Predicted Class'] = df1['Predicted Class'] # Placeholder +# combined_data1['Prediction Probability'] = 0.0 # Initialize probability column +# max_length = max(len(df1), len(df2), len(df3), len(df4)) +# # Step 4: Loop through each row and calculate the highest probability and average +# # for i in range(len(df1)): +# # # Get probabilities from all models +# # probs = [ +# # df1['Prediction Probability'][i], +# # df2['Prediction Probability'][i], +# # df3['Prediction Probability'][i], +# # df4['Prediction Probability'][i], +# # ] + +# # # Get predicted classes +# # classes = [ +# # df1['Predicted Class'][i], +# # df2['Predicted Class'][i], +# # df3['Predicted Class'][i], +# # df4['Predicted Class'][i], +# # ] + +# # # Find the index of the highest probability +# # max_index = probs.index(max(probs)) + +# # # Set the highest predicted class +# # combined_data1.at[i, 'Predicted Class'] = classes[max_index] + +# # # Calculate the average probability +# # combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs) + +# for i in range(max_length): +# probs, classes = [], [] + +# for df in [df1, df2, df3, df4]: +# try: +# probs.append(df['Prediction Probability'].iloc[i]) +# classes.append(df['Predicted Class'].iloc[i]) +# except IndexError: +# # Skip if the row does not exist in this DataFrame +# pass + +# if probs and classes: +# max_index = probs.index(max(probs)) +# combined_data1.at[i, 'Predicted Class'] = classes[max_index] +# combined_data1.at[i, 'Prediction Probability'] = sum(probs) / len(probs) + + + # df5 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_KNeighborsClassifier.csv') + # df6 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_LogisticRegression.csv') + # df7 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_RandomForestClassifier.csv') + # df8 = pd.read_csv(f'media/malware_predictions/{device_id}/asm_prediction_XGBClassifier.csv') + + + +# combined_data2 = pd.DataFrame() + +# # Step 3: Combine predictions +# combined_data2['File'] = df5['File'] # Assuming all files are the same +# combined_data2['Predicted Class'] = df5['Predicted Class'] # Placeholder +# combined_data2['Prediction Probability'] = 0.0 # Initialize probability column + +# # Step 4: Loop through each row and calculate the highest probability and average +# for i in range(len(df5)): +# # Get probabilities from all models +# probs = [ +# df5['Prediction Probability'][i], +# df6['Prediction Probability'][i], +# df7['Prediction Probability'][i], +# df8['Prediction Probability'][i], +# ] + +# # Get predicted classes +# classes = [ +# df5['Predicted Class'][i], +# df6['Predicted Class'][i], +# df7['Predicted Class'][i], +# df8['Predicted Class'][i], +# ] + +# # Find the index of the highest probability +# max_index = probs.index(max(probs)) + +# # Set the highest predicted class +# combined_data2.at[i, 'Predicted Class'] = classes[max_index] + +# # Calculate the average probability +# # combined_data2.at[i, 'Prediction Probability'] = sum(probs) / len(probs) +# combined_data2.at[i,'Prediction Probability'] = probs[max_index] + + # combined_data = pd.concat([combined_data1, combined_data2], ignore_index=True) # return combined_data @@ -708,82 +1330,99 @@ def get_combined_files(device_id): return pd.DataFrame() -@login_required(login_url='login') -@never_cache +# @csrf_exempt +# @login_required(login_url='login') +# @never_cache + def malware(request): - print("Inside the ddos view...") + print("Inside the ddos view...") - device_id = request.GET.get('device_id', None) - print(f"Device ID from headers: {device_id}") + # Attempt to get the device_id from request headers + device_id = request.GET.get('device_id', None) + print(f"Device ID from headers: {device_id}") - if not device_id: - print("No device ID provided in headers. Fetching the latest device for the logged-in user.") - recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() - if recent_device: - device_id = recent_device.id - print(f"Fetched recent device ID: {device_id}") - else: - print("No devices found for the logged-in user.") + # If device_id is not provided in headers, fetch the latest device for the logged-in user + if not device_id: + print("No device ID provided in headers. Fetching the latest device for the logged-in user.") + recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() + if recent_device: + device_id = recent_device.id + print(f"Fetched recent device ID: {device_id}") + else: + print("No devices found for the logged-in user.") + + # Assuming get_combined_files is a function that fetches the combined data + combined_data = get_combined_files(device_id) + # combined_data = get_combined_files() + # print("Combined data:", combined_data) + class_names = { + 1: "Ramnit", + 2: "Lollipop", + 3: "Kelihos_ver3", + 4: "Vundo", + 5: "Simda", + 6: "Tracur", + 7: "Kelihos_ver1", + 8: "Obfuscator.ACY", + 9: "Gatak" + } - # Fetch combined data - combined_data = get_combined_files(device_id) - - # If the data is empty, show a message - if combined_data.empty: - message = "Data is still being captured. Please try again later." - return render(request, 'malware/malware.html', {'message': message}) - - class_names = { - 1: "Ramnit", - 2: "Lollipop", - 3: "Kelihos_ver3", - 4: "Vundo", - 5: "Simda", - 6: "Tracur", - 7: "Kelihos_ver1", - 8: "Obfuscator.ACY", - 9: "Gatak" - } + high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9] + print("heree") + files_list = high_probability_files['File'].tolist() - high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9] - files_list = high_probability_files['File'].tolist() + files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)] + frequency = files70_90['Predicted Class'].value_counts().sort_index() + complete_index = pd.Index(range(10)) + frequency = frequency.reindex(complete_index, fill_value=0) + # print(frequency, 'in the frequency') - files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)] - frequency = files70_90['Predicted Class'].value_counts().sort_index() - complete_index = pd.Index(range(10)) - frequency = frequency.reindex(complete_index, fill_value=0) + all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index() + frequency_with_names = all_frequency.rename(class_names) + # print(frequency_with_names, 'with name') - all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index() - frequency_with_names = all_frequency.rename(class_names) + avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index() + all_classes = pd.DataFrame({'Predicted Class': range(1, 10)}) + avg_probability = pd.merge(all_classes, avg_probability, on='Predicted Class', how='left') + avg_probability['Prediction Probability'].fillna(0, inplace=True) + avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names) + average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability'])) + # print(average_probability_dict, "avg is here ") - avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index() - all_classes = pd.DataFrame({'Predicted Class': range(1, 10)}) - avg_probability = pd.merge(all_classes, avg_probability, on='Predicted Class', how='left') - avg_probability['Prediction Probability'].fillna(0, inplace=True) - avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names) - average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability'])) + file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt') + data = None + try: + with open(file_path, 'r') as file: + data = file.readlines()[::-1] # Reverse lines for latest logs + except: + pass - file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt') - data = None - try: - with open(file_path, 'r') as file: - data = file.readlines()[::-1] # Reverse lines for latest logs - except: - pass - - return render(request, 'malware/malware.html', { - 'files_list': files_list, - 'frequency': frequency.to_dict(), - 'class_frequency': frequency_with_names.to_dict(), - 'average': average_probability_dict, - "logs": data, - 'message': None # Clear message if data is available - }) + return render(request, 'malware/malware.html', {'files_list': files_list, 'frequency': frequency.to_dict(), 'class_frequency': frequency_with_names.to_dict(), 'average': average_probability_dict, "logs": data}) # def malware(request): -# combined_data = get_combined_files() +# print("Inside the ddos view...") +# device_id = request.GET.get('device_id', None) +# print(f"Device ID from headers: {device_id}") + +# if not device_id: +# print("No device ID provided in headers. Fetching the latest device for the logged-in user.") +# recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() +# if recent_device: +# device_id = recent_device.id +# print(f"Fetched recent device ID: {device_id}") +# else: +# print("No devices found for the logged-in user.") + +# # Fetch combined data +# combined_data = get_combined_files(device_id) + +# # If the data is empty, show a message +# if combined_data.empty: +# message = "Data is still being captured. Please try again later." +# return render(request, 'malware/malware.html', {'message': message}) + # class_names = { # 1: "Ramnit", # 2: "Lollipop", @@ -795,25 +1434,17 @@ def malware(request): # 8: "Obfuscator.ACY", # 9: "Gatak" # } - # high_probability_files = combined_data[combined_data['Prediction Probability'] >= 0.9] # files_list = high_probability_files['File'].tolist() - # files70_90 = combined_data[(combined_data['Prediction Probability'] >= 0.7) & (combined_data['Prediction Probability'] < 0.9)] # frequency = files70_90['Predicted Class'].value_counts().sort_index() # complete_index = pd.Index(range(10)) # frequency = frequency.reindex(complete_index, fill_value=0) -# print(frequency,'in the frequency') -# # if frequency: -# # print("Check_malware_frequency") - # all_frequency = combined_data['Predicted Class'].value_counts().reindex(range(1, 10), fill_value=0).sort_index() # frequency_with_names = all_frequency.rename(class_names) -# print(frequency_with_names,'with name') - # avg_probability = combined_data.groupby('Predicted Class')['Prediction Probability'].mean().reset_index() # all_classes = pd.DataFrame({'Predicted Class': range(1, 10)}) @@ -821,8 +1452,7 @@ def malware(request): # avg_probability['Prediction Probability'].fillna(0, inplace=True) # avg_probability['Class Name'] = avg_probability['Predicted Class'].map(class_names) # average_probability_dict = dict(zip(avg_probability['Class Name'], avg_probability['Prediction Probability'])) -# print(average_probability_dict,"avg is here ") - + # file_path = os.path.join(settings.MEDIA_ROOT, 'logs', 'logs.txt') # data = None # try: @@ -831,11 +1461,19 @@ def malware(request): # except: # pass - - -# return render(request, 'malware/malware.html', {'files_list': files_list , 'frequency' : frequency.to_dict() , 'class_frequency' : frequency_with_names.to_dict() , 'average' : average_probability_dict ,"logs":data}) +# return render(request, 'malware/malware.html', { +# 'files_list': files_list, +# 'frequency': frequency.to_dict(), +# 'class_frequency': frequency_with_names.to_dict(), +# 'average': average_probability_dict, +# "logs": data, +# 'message': None # Clear message if data is available +# }) + @never_cache + + def bye_asm_log(request): space_name = 'Extract' object_key = 'extract.log' @@ -866,12 +1504,11 @@ def bye_asm_log(request): return JsonResponse(live_data) - +# @csrf_exempt # @login_required(login_url='login') # @never_cache # def ransomware(request): - -# file_path = 'media/logs/usage_log.txt' +# file_path = os.path.join('media', 'logs', 'usage_log.txt') # cpu_data = [] # memory_data = [] @@ -886,40 +1523,118 @@ def bye_asm_log(request): # for line in lines: # # Parse CPU and memory usage from each line # parts = line.strip().split(",") -# cpu_usage = parts[0] -# memory_usage = parts[1] -# cpu_data.append(cpu_usage) -# memory_data.append(memory_usage) +# if len(parts) >= 2: +# cpu_usage = parts[0] +# memory_usage = parts[1] +# cpu_data.append(cpu_usage) +# memory_data.append(memory_usage) +# else: +# print(f"Skipping malformed line: {line}") +# else: +# print(f"Usage log file not found at path: {file_path}") - +# device_id = request.GET.get('device_id', None) +# # device_id=53 +# print(f"Device ID from headers: {device_id}") +# # If no device_id is found in the request, get the latest device for the logged-in user +# if not device_id: +# print("No device ID found. Fetching the latest device for the logged-in user.") +# recent_device = Devices.objects.filter(used_by__user=request.user).order_by('-id').first() +# if recent_device: +# device_id = recent_device.id # Use the actual device ID from the database +# print(f"Fetched recent device ID: {device_id}") +# else: +# print("No devices found for the logged-in user.") +# return JsonResponse({'error': 'No devices found for the logged-in user'}, status=404) +# # Construct file paths correctly using os.path.join +# csv_file_path = os.path.join('media', 'ransomware_predictions', str(device_id), 'latest_ransomware_type.csv') +# mapping_file_path = os.path.join('media', 'ransomware_predictions', 'mapping_win.txt') +# yes_no_path = os.path.join('media', 'ransomware_predictions', 'ransomware.csv') -# csv_file_path = 'media/ransomware_predictions/latest_ransomware_type.csv' # Replace with your actual CSV file path -# df = pd.read_csv(csv_file_path) -# mapping_file_path = 'media/ransomware_predictions/mapping_win.txt' -# mapping_df = pd.read_csv(mapping_file_path, header=None, names=['predicted_class', 'class_name']) -# class_mapping = dict(zip(mapping_df['predicted_class'], mapping_df['class_name'])) -# df['class_name'] = df['predicted_class'].map(class_mapping) -# class_frequency = df['class_name'].value_counts() -# all_classes_df = pd.DataFrame({'class_name': mapping_df['class_name']}) -# all_classes_df['frequency'] = all_classes_df['class_name'].map(class_frequency).fillna(0).astype(int) -# class_frequency_dict = dict(zip(all_classes_df['class_name'], all_classes_df['frequency'])) +# # Debugging: Print the file paths +# print(f"CSV file path: {csv_file_path}") +# print(f"Mapping file path: {mapping_file_path}") +# print(f"Yes/No file path: {yes_no_path}") -# yes_no_path = 'media/ransomware_predictions/ransomware.csv' +# # Initialize variables to hold processed data +# class_frequency_dict = {} +# flag = None +# time = None -# # Reading the CSV file into a DataFrame -# yes_no = pd.read_csv(yes_no_path) +# # Process the latest ransomware type CSV +# try: +# if not os.path.exists(csv_file_path): +# raise FileNotFoundError(f"CSV file not found at path: {csv_file_path}") -# # # Extracting the value of 'Predicted Label' -# flag =yes_no[yes_no.columns[-1]].iloc[0] -# time = yes_no[yes_no.columns[-2]].iloc[0] - +# # Load ransomware type CSV +# df = pd.read_csv(csv_file_path) +# print(f"Loaded ransomware type CSV: {csv_file_path}") +# # Load mapping file +# if not os.path.exists(mapping_file_path): +# raise FileNotFoundError(f"Mapping file not found at path: {mapping_file_path}") - -# return render(request, 'ransomware/ransomware.html' , context={ 'type' : class_frequency_dict, 'cpu' : json.dumps(cpu_data) , 'memory' : json.dumps(memory_data) , 'flag' : flag,'time' : time}) +# mapping_df = pd.read_csv(mapping_file_path, header=None, names=['predicted_class', 'class_name']) +# class_mapping = dict(zip(mapping_df['predicted_class'], mapping_df['class_name'])) +# print("Loaded mapping file and created class mapping dictionary.") + +# # Map predicted classes to class names +# df['class_name'] = df['predicted_class'].map(class_mapping) +# class_frequency = df['class_name'].value_counts() + +# # Ensure all classes from mapping are present in the frequency dictionary +# all_classes_df = pd.DataFrame({'class_name': mapping_df['class_name']}) +# all_classes_df['frequency'] = all_classes_df['class_name'].map(class_frequency).fillna(0).astype(int) +# class_frequency_dict = dict(zip(all_classes_df['class_name'], all_classes_df['frequency'])) + +# print(f"Class frequency dictionary: {class_frequency_dict}") + +# except FileNotFoundError as e: +# print(f"FileNotFoundError: {str(e)}") +# return JsonResponse({'error': str(e)}, status=404) +# except Exception as e: +# print(f"Exception while processing ransomware type CSV: {str(e)}") +# return JsonResponse({'error': f"Error processing ransomware type CSV: {str(e)}"}, status=500) + +# # Process the ransomware flag CSV +# try: +# if not os.path.exists(yes_no_path): +# raise FileNotFoundError(f"Ransomware CSV file not found at path: {yes_no_path}") + +# # Load ransomware flag CSV +# yes_no = pd.read_csv(yes_no_path) +# print('Loaded ransomware flag CSV:', yes_no) + +# if yes_no.empty: +# raise ValueError("Ransomware CSV file is empty.") + +# # Extracting the value of 'Predicted Label' and 'Time' +# flag = yes_no.iloc[0, -1] # Assuming 'Predicted Label' is the last column +# time = yes_no.iloc[0, -2] # Assuming 'Time' is the second last column + +# print(f"Extracted flag: {flag}, time: {time}") + +# except FileNotFoundError as e: +# print(f"FileNotFoundError: {str(e)}") +# return JsonResponse({'error': str(e)}, status=404) +# except Exception as e: +# print(f"Exception while processing ransomware flag CSV: {str(e)}") +# return JsonResponse({'error': f"Error processing ransomware flag CSV: {str(e)}"}, status=500) + +# # Prepare context for rendering the template +# context = { +# 'type': class_frequency_dict, +# 'cpu': json.dumps(cpu_data), +# 'memory': json.dumps(memory_data), +# 'flag': flag, +# 'time': time +# } + +# return render(request, 'ransomware/ransomware.html', context=context) +@csrf_exempt @login_required(login_url='login') @never_cache def ransomware(request): @@ -1051,7 +1766,6 @@ def ransomware(request): return render(request, 'ransomware/ransomware.html', context=context) - #================================================================================================== import time @@ -1572,7 +2286,7 @@ def generate_random_values(request): # response = s3.get_object(Bucket=space_name, Key=object_key) # content = response['Body'].read().decode('utf-8') -# # Return the content as a JSON response +# # Return the content as a JSON response1 # return JsonResponse({"log_content": content}) # except Exception as e: # return JsonResponse({"error": str(e)}, status=500) @@ -1662,6 +2376,7 @@ class SqlStatusView(APIView): # } # return JsonResponse(response_data) + from django.http import JsonResponse import boto3 from botocore.exceptions import NoCredentialsError, ClientError @@ -1704,8 +2419,6 @@ def sql_status_info(request): return JsonResponse(response_data) - - @csrf_exempt def restore_database(request): if request.method == "POST": @@ -1758,7 +2471,7 @@ def check_restore_value1(request): 'mysql': mysql, } return JsonResponse(response_data) - +from datetime import datetime @api_view(['POST']) def upload_csv(request): # Check if the request contains a file @@ -1999,8 +2712,9 @@ def malware_ASM_predictions_KNeighbours(request): return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) - +from datetime import datetime @api_view(['POST']) + def upload_logs(request): log_file = request.FILES.get('file') @@ -2021,22 +2735,119 @@ def upload_logs(request): save_path = os.path.join(folder_path, 'logs.txt') # If the file already exists, remove it to ensure overwriting - try: - if os.path.exists(save_path): - os.remove(save_path) - except Exception as e: - print(f"warning: {e}") - + if os.path.exists(save_path): + os.remove(save_path) # Save the new file - with open(save_path, 'w') as destination: + with open(save_path, 'wb+') as destination: for chunk in log_file.chunks(): - destination.write(f'{datetime.now()} - {chunk}') + destination.write(chunk) return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) +# @api_view(['POST']) +# def ransomware_predictions(request): +# csv_file = request.FILES.get('file') +# if not csv_file: +# return JsonResponse({'error': 'No file provided'}, status=400) +# if not csv_file.name.endswith('.csv'): +# return JsonResponse({'error': 'File is not CSV'}, status=400) + +# # Define the directory and file path where the CSV will be stored +# # folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions') + +# # # Make sure the directory exists +# # if not os.path.exists(folder_path): +# # os.makedirs(folder_path) + +# # # Define the path for the file (always named 'latest_ransomware.csv') +# # save_path = os.path.join(folder_path, 'latest_ransomware.csv') + +# # # If the file already exists, remove it to ensure overwriting +# # if os.path.exists (save_path): +# # os.remove(save_path) + +# # # Save the new file +# # with open(save_path, 'wb+') as destination: +# # for chunk in csv_file.chunks(): +# # destination.write(chunk) +# # user_id = request.data.get('user_id') +# user_id = request.data.get('user_id') + +# if not user_id: +# return JsonResponse({'error': 'User ID is required'}, status=400) + +# try: +# # Retrieve the UserProfile based on the provided user_id +# user_profile = UserProfile.objects.get(user__id=user_id) +# print(user_profile) + +# # Get the device IDs associated with the user +# device_ids = get_device_ids_by_user_id(user_id) +# print(f"Device IDs: {device_ids}") + +# # Check if the user has devices associated with them +# if not device_ids: +# return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) + +# # Assuming we want to use the first device associated with the user +# device = Devices.objects.get(id=device_ids[-1]) +# print(f"Device ID: {device.id}") +# folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions', str(device.id)) +# # folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions') +# if not os.path.exists(folder_path): +# os.makedirs(folder_path) + +# # Define the path for the file (always named 'latest_ransomware.csv') +# save_path = os.path.join(folder_path, 'latest_ransomware.csv') + +# # If the file already exists, remove it to ensure overwriting +# if os.path.exists(save_path): +# os.remove(save_path) + +# # Save the new file +# with open(save_path, 'wb+') as destination: +# for chunk in csv_file.chunks(): +# destination.write(chunk) + +# # if not user_id: +# # return JsonResponse({'error': 'User ID is required'}, status=400) +# # try: +# # # Retrieve the UserProfile based on the provided user_id +# # user_profile = UserProfile.objects.get(user__id=user_id) +# # print(user_profile) + +# # # Get the device IDs associated with the user +# # device_ids = get_device_ids_by_user_id(user_id) +# # print(f"Device IDs: {device_ids}") + +# # # Check if the user has devices associated with them +# # if not device_ids: +# # return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) + +# # # Assuming we want to use the first device associated with the user +# # device = Devices.objects.get(id=device_ids[-1]) +# # print(f"Device ID: {device.id}") + +# # Create the DdosPrediction record +# rensomware_audit_prediction = Rensomware_AuditPrediction.objects.create( +# device=device, +# user=user_profile, +# file_path=save_path +# ) + +# return JsonResponse({ +# 'message': 'File uploaded and prediction saved successfully', +# 'file_path': save_path, +# 'prediction_id': rensomware_audit_prediction.id +# }) + +# except UserProfile.DoesNotExist: +# return JsonResponse({'error': 'User not found'}, status=404) +# except Devices.DoesNotExist: +# return JsonResponse({'error': 'Device not found'}, status=404) @api_view(['POST']) def ransomware_predictions(request): csv_file = request.FILES.get('file') @@ -2047,62 +2858,62 @@ def ransomware_predictions(request): if not csv_file.name.endswith('.csv'): return JsonResponse({'error': 'File is not CSV'}, status=400) - # Define the directory and file path where the CSV will be stored - folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions') - - # Make sure the directory exists - if not os.path.exists(folder_path): - os.makedirs(folder_path) + user_id = request.data.get('user_id') - # Define the path for the file (always named 'latest_ransomware.csv') - save_path = os.path.join(folder_path, 'latest_ransomware.csv') + if not user_id: + return JsonResponse({'error': 'User ID is required'}, status=400) - # If the file already exists, remove it to ensure overwriting - if os.path.exists(save_path): - os.remove(save_path) + try: + # Retrieve the UserProfile based on the provided user_id + user_profile = UserProfile.objects.get(user__id=user_id) + print(user_profile) - # Save the new file - with open(save_path, 'wb+') as destination: - for chunk in csv_file.chunks(): - destination.write(chunk) + # Get the device IDs associated with the user + device_ids = get_device_ids_by_user_id(user_id) + print(f"Device IDs: {device_ids}") - return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) + # Check if the user has devices associated with them + if not device_ids: + return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) -# @api_view(['POST']) -# def ransomware_type_predictions(request): -# try: - -# csv_file = request.FILES.get('file') + # Assuming we want to use the last device associated with the user + device = Devices.objects.get(id=device_ids[-1]) + print(f"Device ID: {device.id}") -# if not csv_file: -# return JsonResponse({'error': 'No file provided'}, status=400) + # Define the directory for storing the file + folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions', str(device.id)) + if not os.path.exists(folder_path): + os.makedirs(folder_path) -# if not csv_file.name.endswith('.csv'): -# return JsonResponse({'error': 'File is not CSV'}, status=400) + # Define the path for the file (always named 'latest_ransomware.csv') + save_path = os.path.join(folder_path, 'latest_ransomware.csv') -# # Define the directory and file path where the CSV will be stored -# folder_path = os.path.join(settings.MEDIA_ROOT, 'ransomware_predictions') - -# # Make sure the directory exists -# if not os.path.exists(folder_path): -# os.makedirs(folder_path) + # If the file already exists, remove it to ensure overwriting + if os.path.exists(save_path): + os.remove(save_path) -# # Define the path for the file (always named 'latest_ransomware.csv') -# save_path = os.path.join(folder_path, 'latest_ransomware_type.csv') + # Save the new file + with open(save_path, 'wb+') as destination: + for chunk in csv_file.chunks(): + destination.write(chunk) -# # If the file already exists, remove it to ensure overwriting -# if os.path.exists(save_path): -# os.remove(save_path) + # Create the Rensomware_AuditPrediction record + rensomware_audit_prediction = Rensomware_AuditPrediction.objects.create( + device=device, + user=user_profile, + file_path=save_path + ) -# # Save the new file -# with open(save_path, 'wb+') as destination: -# for chunk in csv_file.chunks(): -# destination.write(chunk) - -# return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) -# except Exception as e: -# print(e) + return JsonResponse({ + 'message': 'File uploaded and prediction saved successfully', + 'file_path': save_path, + 'prediction_id': rensomware_audit_prediction.id + }) + except UserProfile.DoesNotExist: + return JsonResponse({'error': 'User not found'}, status=404) + except Devices.DoesNotExist: + return JsonResponse({'error': 'Device not found'}, status=404) @api_view(['POST']) def ransomware_type_predictions(request): @@ -2194,7 +3005,40 @@ def ransomware_type_predictions(request): except Devices.DoesNotExist: return JsonResponse({'error': 'Device not found'}, status=404) - + + +# @api_view(['POST']) +# def ddos_predictions(request): +# csv_file = request.FILES.get('file') + +# if not csv_file: +# return JsonResponse({'error': 'No file provided'}, status=400) + +# if not csv_file.name.endswith('.csv'): +# return JsonResponse({'error': 'File is not CSV'}, status=400) + +# # Define the directory and file path where the CSV will be stored +# folder_path = os.path.join(settings.MEDIA_ROOT, 'ddos_predictions') + +# # Make sure the directory exists +# if not os.path.exists(folder_path): +# os.makedirs(folder_path) + +# # Define the path for the file (always named 'latest_ransomware.csv') +# save_path = os.path.join(folder_path, 'predictions.csv') + +# # If the file already exists, remove it to ensure overwriting +# if os.path.exists(save_path): +# os.remove(save_path) + +# # Save the new file +# with open(save_path, 'wb+') as destination: +# for chunk in csv_file.chunks(): +# destination.write(chunk) + + +# return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) + def get_device_ids_by_user_id(user_id): try: # Get the UserProfile instance using the user ID @@ -2228,7 +3072,7 @@ def get_device_ids_by_user_id(user_id): # if not os.path.exists(folder_path): # os.makedirs(folder_path) -# # Define the path for the file (always named 'latest_ransomware.csv') +# # Define the path for the file (always named 'predictions.csv') # save_path = os.path.join(folder_path, 'predictions.csv') # # If the file already exists, remove it to ensure overwriting @@ -2240,7 +3084,113 @@ def get_device_ids_by_user_id(user_id): # for chunk in csv_file.chunks(): # destination.write(chunk) -# return JsonResponse({'message': 'File uploaded and overwritten successfully', 'file_path': save_path}) +# # Extract user_id from the request (device_id is not needed now) +# user_id = request.data.get('user_id') + +# if not user_id: +# return JsonResponse({'error': 'User ID is required'}, status=400) + +# try: +# # Retrieve the UserProfile based on the provided user_id +# user_profile = UserProfile.objects.get(user__id=user_id) +# print(user_profile) + +# # Get the device IDs associated with the user +# device_ids = get_device_ids_by_user_id(user_id) +# print(f"Device IDs: {device_ids}") + +# # Check if the user has devices associated with them +# if not device_ids: +# return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) + +# # Assuming we want to use the first device associated with the user +# device = Devices.objects.get(id=device_ids[0]) +# print(f"Device ID: {device.id}") + +# # Create the DdosPrediction record +# ddos_prediction = DdosPrediction.objects.create( +# device=device, +# user=user_profile, +# file_path=save_path +# ) + +# return JsonResponse({ +# 'message': 'File uploaded and prediction saved successfully', +# 'file_path': save_path, +# 'prediction_id': ddos_prediction.id +# }) + +# except UserProfile.DoesNotExist: +# return JsonResponse({'error': 'User not found'}, status=404) +# except Devices.DoesNotExist: +# return JsonResponse({'error': 'Device not found'}, status=404) + +# @api_view(['POST']) +# def ddos_predictions(request): +# # Check if a file is provided in the request +# csv_file = request.FILES.get('file') +# if not csv_file: +# return JsonResponse({'error': 'No file provided'}, status=400) + +# # Ensure the file is a CSV +# if not csv_file.name.endswith('.csv'): +# return JsonResponse({'error': 'File is not CSV'}, status=400) + +# # Extract user_id from the request data +# user_id = request.data.get('user_id') +# device_ids = get_device_ids_by_user_id(user_id) +# print(f"Device IDs: {device_ids}") + +# # Check if the user has associated devices +# if not device_ids: +# return JsonResponse({'error': 'No devices associated with the given user ID'}, status=400) + +# try: +# # Retrieve the UserProfile for the logged-in user +# user_profile = UserProfile.objects.get(user__id=user_id) +# print('userrr',user_profile) + + +# # Get the most recent device associated with the user +# device_ids = get_device_ids_by_user_id(user_id) +# print('deviceeeee',device_ids) +# if not device_ids: +# return JsonResponse({'error': 'No devices found for the logged-in user'}, status=400) + +# device = Devices.objects.get(id=device_ids[-1]) +# print(f"Device ID: {device.id}") +# # Define the path for saving the file under 'ddos_predictions' +# folder_path = os.path.join(settings.MEDIA_ROOT, 'ddos_predictions') +# os.makedirs(folder_path, exist_ok=True) + +# # Create a unique file name for the recent device +# save_path = os.path.join(folder_path, f'prediction.csv') + +# # Save the file in chunks +# with open(save_path, 'wb+') as destination: +# for chunk in csv_file.chunks(): +# destination.write(chunk) + +# # Create a DdosPrediction record +# ddos_prediction = DdosPrediction.objects.create( +# device=device, +# user=user_profile, +# file_path=save_path +# ) + +# # Return a success response +# return JsonResponse({ +# 'message': 'File uploaded and prediction saved successfully', + +# 'file_path': save_path, +# 'prediction_id': ddos_prediction.id +# }) + +# except UserProfile.DoesNotExist: +# return JsonResponse({'error': 'User profile not found'}, status=404) +# except Exception as e: +# return JsonResponse({'error': f'An unexpected error occurred: {str(e)}'}, status=500) +#===================with unique file path=============================================== from rest_framework.permissions import AllowAny from rest_framework.decorators import permission_classes @@ -2306,6 +3256,7 @@ def ddos_predictions(request): except Exception as e: return JsonResponse({'error': f'An unexpected error occurred: {str(e)}'}, status=500) + @api_view(['POST']) def usage_log(request): try: