From 5b74ae641c64025a966f123bce5f83527994d3bb Mon Sep 17 00:00:00 2001 From: "Kalam :p" <63474858+YourKalamity@users.noreply.github.com> Date: Sat, 27 Jun 2020 18:11:11 +0100 Subject: [PATCH] added stuff --- __pycache__/py7zr.cpython-38.pyc | Bin 0 -> 30145 bytes main.py | 128 ++ py7zr/archiveinfo.py | 1094 +++++++++++++++++ py7zr/callbacks.py | 61 + py7zr/cli.py | 317 +++++ py7zr/compression.py | 395 ++++++ py7zr/exceptions.py | 46 + py7zr/extra.py | 214 ++++ py7zr/helpers.py | 397 ++++++ py7zr/properties.py | 155 +++ py7zr/py7zr.py | 974 +++++++++++++++ py7zr/win32compat.py | 174 +++ requests/__init__.py | 131 ++ requests/__pycache__/__init__.cpython-38.pyc | Bin 0 -> 3369 bytes .../__pycache__/__version__.cpython-38.pyc | Bin 0 -> 538 bytes .../_internal_utils.cpython-38.pyc | Bin 0 -> 1303 bytes requests/__pycache__/adapters.cpython-38.pyc | Bin 0 -> 16894 bytes requests/__pycache__/api.cpython-38.pyc | Bin 0 -> 6718 bytes requests/__pycache__/auth.cpython-38.pyc | Bin 0 -> 8322 bytes requests/__pycache__/certs.cpython-38.pyc | Bin 0 -> 604 bytes requests/__pycache__/compat.cpython-38.pyc | Bin 0 -> 1638 bytes requests/__pycache__/cookies.cpython-38.pyc | Bin 0 -> 18817 bytes .../__pycache__/exceptions.cpython-38.pyc | Bin 0 -> 5215 bytes requests/__pycache__/help.cpython-38.pyc | Bin 0 -> 2655 bytes requests/__pycache__/hooks.cpython-38.pyc | Bin 0 -> 975 bytes requests/__pycache__/models.cpython-38.pyc | Bin 0 -> 23851 bytes requests/__pycache__/packages.cpython-38.pyc | Bin 0 -> 392 bytes requests/__pycache__/sessions.cpython-38.pyc | Bin 0 -> 19522 bytes .../__pycache__/status_codes.cpython-38.pyc | Bin 0 -> 4230 bytes .../__pycache__/structures.cpython-38.pyc | Bin 0 -> 4443 bytes requests/__pycache__/utils.cpython-38.pyc | Bin 0 -> 22316 bytes requests/__version__.py | 14 + requests/_internal_utils.py | 42 + requests/adapters.py | 533 ++++++++ requests/api.py | 161 +++ requests/auth.py | 305 +++++ requests/certs.py | 18 + requests/compat.py | 72 ++ requests/cookies.py | 549 +++++++++ requests/exceptions.py | 126 ++ requests/help.py | 119 ++ requests/hooks.py | 34 + requests/models.py | 954 ++++++++++++++ requests/packages.py | 14 + requests/sessions.py | 767 ++++++++++++ requests/status_codes.py | 123 ++ requests/structures.py | 105 ++ requests/utils.py | 982 +++++++++++++++ 48 files changed, 9004 insertions(+) create mode 100644 __pycache__/py7zr.cpython-38.pyc create mode 100644 main.py create mode 100644 py7zr/archiveinfo.py create mode 100644 py7zr/callbacks.py create mode 100644 py7zr/cli.py create mode 100644 py7zr/compression.py create mode 100644 py7zr/exceptions.py create mode 100644 py7zr/extra.py create mode 100644 py7zr/helpers.py create mode 100644 py7zr/properties.py create mode 100644 py7zr/py7zr.py create mode 100644 py7zr/win32compat.py create mode 100644 requests/__init__.py create mode 100644 requests/__pycache__/__init__.cpython-38.pyc create mode 100644 requests/__pycache__/__version__.cpython-38.pyc create mode 100644 requests/__pycache__/_internal_utils.cpython-38.pyc create mode 100644 requests/__pycache__/adapters.cpython-38.pyc create mode 100644 requests/__pycache__/api.cpython-38.pyc create mode 100644 requests/__pycache__/auth.cpython-38.pyc create mode 100644 requests/__pycache__/certs.cpython-38.pyc create mode 100644 requests/__pycache__/compat.cpython-38.pyc create mode 100644 requests/__pycache__/cookies.cpython-38.pyc create mode 100644 requests/__pycache__/exceptions.cpython-38.pyc create mode 100644 requests/__pycache__/help.cpython-38.pyc create mode 100644 requests/__pycache__/hooks.cpython-38.pyc create mode 100644 requests/__pycache__/models.cpython-38.pyc create mode 100644 requests/__pycache__/packages.cpython-38.pyc create mode 100644 requests/__pycache__/sessions.cpython-38.pyc create mode 100644 requests/__pycache__/status_codes.cpython-38.pyc create mode 100644 requests/__pycache__/structures.cpython-38.pyc create mode 100644 requests/__pycache__/utils.cpython-38.pyc create mode 100644 requests/__version__.py create mode 100644 requests/_internal_utils.py create mode 100644 requests/adapters.py create mode 100644 requests/api.py create mode 100644 requests/auth.py create mode 100644 requests/certs.py create mode 100644 requests/compat.py create mode 100644 requests/cookies.py create mode 100644 requests/exceptions.py create mode 100644 requests/help.py create mode 100644 requests/hooks.py create mode 100644 requests/models.py create mode 100644 requests/packages.py create mode 100644 requests/sessions.py create mode 100644 requests/status_codes.py create mode 100644 requests/structures.py create mode 100644 requests/utils.py diff --git a/__pycache__/py7zr.cpython-38.pyc b/__pycache__/py7zr.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f7b7f65a5350a7a2e44d693e5e41395678c7c54 GIT binary patch literal 30145 zcmcJ2dw5*Ob>H5*Pb?OT2SE@dL2^Y>Bp{IxMahy(Q4~oKkf;YplaMHHXfGFgFUVc& z1DtyylIw-6n6jNFQ7k{|=Ak5@o0r-oZ5^j+oHX_4I%%5r>*qRc;@-4wx7|M?F`BEQFn z&c7HQPU3R@$cRL&NIqg2mRU9Orre|XsN7@unB3#}xZD%@gxr()q})^al-&FBeR5Cd z({k_6_scz#&)^=dCTauu0fXO&RkO9h{9tV;KU5pe57$QWBegC0Ew$17Xl-kLYi%q) zCgtMQZME_Icx`)rdu>O4homK{J8Kj935h4GyK1}hyK8&$duq9SPSR4Vew9{K48}ezG=|pQ_!LzfaQB)%$CQ@`q}N^M`BG`Dr6!_g5aU z`j;bb80vt)ehoTst-x*K|kf8{6i@H@cO%~%pC*epOf52@{d>p`A4m6{;{Vb z)}S@?TErTW126G%IOw5`?{(#G)ClX%)@ zjpJz?PbcxT-P(bt9rlBjQz(7f9~~_aOGPWnPR-?Qib5U>B|2qs{V4ZmFSaMK@PerEBGvZD;x$q>=e4!%LpN z<=W2Kb5kkLJbTWIo~qySQm4!HqPm60*qL(4^%o7gkrQwikb) zj_0YUmzZl*En9hsr|~`92QQYd){E|{vULJJGdO$GRmGA!Q><1Oi=}0}HFD8@*{IGSin1N2ff_C~)G}%qzG}OLn(ba|SWcl{tl7>~%t8FmGSA+j4I74g*Qgd z*d_mc0Hf0e&emO90aAIyn?t8`yBEr+$t~7ay!i8W4ec8&6|1GyYSFa|B~^OpsFz(r z^)8AP+(yB20cPs?Q%{|pDLk6@vKMAgohh7t?%d3?7tiKry+Oa$^F_zG(NNZu`Hd*B z`i%qvFM6X~FGpUDM9OcUi$qFD>iP?Z<*#5^+L640p@|`xkJ>RiZYS*IlDR!%nO5}8 zs1>u~=;%~Odg4vPN?IwT_OYzh2XaVT{di8>{dT4@V6jBT8h9;s+sJ2;HYh1sYfw^# zkdly;A!}GthLJKNDI?YvN!fywQ9A}GSl6hvRZ_PiH6|%z);38QL&~pn@zg(>%2ha}}5q-?YI`Yk+cO-t%NNe$ZlfOSMt_jjc}XdRW*du52uH{HnP7d^3w2E#qSFSr{%MJ#kI}V85Ig~5bt(<#n#m?QhhQVShsio}Z%1+KIE4$=2 z)U89gYPr6gYp7hEOrYw2Pk*qE;ZQFDxW-Zr737YUs-Wg$S2`MWB`0YB*Ul*$qq=Te zxy4(#GR9cZE!VH+jul;39lIi}EL6cOuAn-pKd3Gk>O>GS7b^hG(QTe3f^`@3IR_uA+Q4CL(N+Ppk2V7&PUoI)I4;vy9a}jJbH~U)j$N@e=K*LD>Hpxq9^p(gZf>1E`)WV@|a^+mn1{&MebV>br832otimz`32(O@^Z0StX)3SD6Q5&iO%Jx%I?#v zid-Wq7x9%bPh97wE@<9HMeZvLM}S5TrX8uE!#Sl`}(Wj zuP{S3>eX9cfAt3r@wx02!hymx=~)g%Qxx-`v?rDn0K^nO+o~qsfM(68tfq!X;$G&owh7oR+aYatQ+=7IBT`wAyP#`2SuA zvlk;TNvQUtFra6{ywtPyEj{n+=BNje+S`tS0tF1e9kJ|uL?;jPFzgx@k8h{ zH#H|x@8YK(+2nhVBCT|{@9CGn{qHsX89_!SiJlx-lb+_9(_ShV4|fOYF@SS51S#6h zXoA<@0Z+cxKcL(*|0JGWw z>bB0_>Zsw{hj!k}tkwe&*0%2cX)@C5;it{!c;}}(tJv^ua&Q?`oL}6?y5X9YD27(- z4)p}1x$T7x05sB^a?_rkievUd&2FPnbvS!jt0lBjI@DAYEJ#%5EgZeGO z<7C6vnWnsiRlW-4<=cN6y4&b{uLg%Faj=;aK7yLlL z*uXQpVc*sRJGJF=^UrM#I>nDqb%5R_TLvHO0-GHal6DRcA!$QkCQG|xs0oUdPeLxB zYyiR*()|Xx9w>x!uqKmTJ&?XIfA*!q>`M!?^B2#aqqHsa^k!tgMPz~1I^;sA6*ly^ zM*Rm+Z%Fzi{^p*}al6SIcW)Oz%y!*ri_1G)V;#elg8?~F42+nTFPabbc2qIv+^Q`$ zs^yZXd>i{~9?~C0Hcf2Fi-ofnpPPSHBW0ooDHmqXpSo~yws8L3+4+UHe9_Rbdl^9* zbcU}|#r)9_5z&+ggWaR4U9hYDl%c{2YQ%3phd-|9>4?t+W;eFA!kG#9e?ZyL*D#N- zG|Eu8p)*4MCzsgR{;8O>RhqBniHN5eY$7Ik{Qx)k!cU@*lRywb!(wkYyC(gPa$W7} zqSxJc_wM~FR)lDC!@xs-HcHF3yRjkjb0Fw;Po~(D7h#uDur+Pq)qTXgRP-~*)ubBq z(AG{;4a;>kcFwU~+D{tJbmaAatm{UyfQBALXf zF{>PcCvZ9E5rA2xcf`g9h7zVR5;ct>UNL=*`teabkH%Oc$Qg=_@F|HauJ2MNZ5mR0 z2^Dw!5m_g3IhPT1nzFG55`zI7Rvj3yv9d%{wVe{fcEU=+2%NI|#DpEQ(kRt$WoW>* z2INUh)mUAkfjWZKr35DQEuqPsA|V$gn)}a+6_PSqE;PQ86g&BvA`5%VfIhyoguz!5 z#)2gNG0tlSmqRRH1*0xNiKqQdY;lmAVTCj2r54!T(j;O>?3`X4GmU*y*?2Dy8yN5C z8tE01(r@C@lL%~Su%>kiYivqrQKJB|mzC^8umRR65(QRDH8hW`1IjxF*0}&>U$AVXE}1`ExdprS(I zYaPIB);&_Ft4G)yq42_y-!cMBEXUM{Yko*uJtJy2<1TER)VsF{wrJS8ZBjD>)|T)x zq_YZcf;BIFmZT+?EE#v2$b7x*8j5*Q*vfk-&Fe2v-b8b1g~IQmkdr|Wfk`VSTG?2) zUgl#td}GScL_VNs5>t5w-@$-GTfL8ga7A)T^?nAzDM?0ZnL&-g3WGMKNleJCgp4oa z5}?4G%%S*rG>d=599~_2MD9sk4oSDoSS%9^0qbNTYl&gen(QGdUEI|_q|uO0WyEk? z`=eV%jNsk$MhaTlv$cu6iUH{*U>Gy`!DY*5a5+a2aFDws8}l|4b~WloK~yxHBYivd1W;fFO?nh9#liuGAehxRI6@)r zD%>d(%M99|Ytsiu=r?f*{!(TtraKfVU4O*TNnFl2f;L^il>zIeppBRn12x3M zRk^cRdufh8jTM^EG}*DjR7#u}1Y@lQnU%YIzyGq^(!9y|@Ea&GHh9RS4&f$Ouy@&8*2v)J+Dr7|>FsKER;G zK%|K_0z=ySYh-F`yi75iW@M@flxrd)q`djJ;=NrYqg?u_U3t5AZlLzRO%{rF=r~iNTkU9zYH8R6B5HHiYTq81CSD$S1ilWI*OLJ|s zg?-YGfw)jdh%-##QyPg*DObQt;5e*P)z zG*TWw@35{T_V(=&!tdwKSZ9#?ptXd2I}a*4XPw7;kJv}8_n^-&SQi27 zqgDm|z&=e#KWM6302D7+FQW8g)+OsDVDjD8@+Rfpi*kAEJ5cVJb=i6!%02G)%loYY zYCjH0cKNuuVil3{1X2QAFIpv}oUo#p9I9TX(rsq0X z66C+7V#&_A4LDT=bBQzU8H)B8FA2o}MI5gmPEhs|E_hZzZw%an;$DoZgBPQrL461V zRlUYwAA%A_to-qw#Ko~ZgUhLcp|;=}w478!E?k0c8<_s%u;uru$B-UH`lJgTi|MY} z0);^r;OrEw!^MYp7whS6{I;onS)LOzrzPcGQ&rql3(iEh>2J_ie+H>32pO+RYM-C_ zFjCV<{j{X^`>DH;nnCJMOX`51ngWbjfsr+)>%S#m@2_NToA91HuOV>bE$9l!51{z( zEu)eG-x++JseSbqH5_;&BRPFx&T$P3at=sNf0(mZ!-JfYk~0wI^ulynU>c}o1*TU8 zTN*2U?jW!+WJPXst_O6|h7lLMZhhDtA=GzpeaNszkh7)P!Z)MKY}w~w-W!ET)<<0o z@vV2TNL_zP+L1v1H<32hir!)Swz=cCQKqmRX**ahEI%o&WBG5n_)2};ZPN63zH>SKhN;dNk+p;oX%3;D38^o^67PaexR!V&@e&rx>n4`UNe#Qo{PJ#* zV)j^!=iGbN_ugS_A7z-S(Rs*?-wD$5j2oVD(=&?A`{AX>`3kGO*H__J1YaiF$_}c7 zhjIsRFt~{T*06)cgYs(gI9-{rWK(az)@8$_R<6NikbWpwro;G=S+?y0b))QFgV&Ar zqnlos^Gvmz|a!6X#=MAd+8U8)m2+`FK^3y!#&#uUMpl1{Ln}M zFdYe8cr@9T2XnYM)L`U`f&_((W1cR8Slgvo=fw!KXD+P>xatbhRFN5G87@L+&z&w} zH985MjEiet-vX><8j^(eit=JKp!pSFu$N})F4PySnN($|fmRT@`*;ILgSDdo*F?Hh zX5rxoM-(VH;;N*LRV98mP6!2^VUPrSa_n3|A6=JlA9C2wzOHQsy-Jqk4*dCpOUri9V;SkSF+VOf(DrjZg?KZv-OJq7+EW_Zn? zKF$K4WbhV)O{h-YVFnwQLWCBgEQUd|r+@un4?}@ewhtw{`kDk7$0cpzkR}1r9_frg zCXt!aHP^MT31>dYC6(BELS`Fi&Fd*Fbee9TF2UKhlDrdLi?*WTt8jtyk!aoNmbuBfrGT ze}uuOnKvR60u4-fS%1qPoBET9#nmOqj_Q~AMh`*$5Yle4&vqaZ0XdTCk!_Jt#Xz8B zjU!lq?k3C)b3+$hl7WPM_zWI^Sz47rdlkrzmG~VKcu%aOzmT4ibVxaO44R%$ChhjO zjP=YN7+aU4YR(d7VH#QT9WTR)M$sx-D` zR~;GEU89RF<~74g_ae~ND7`izA@l-_D)LcSqr?KWgkrFLB^^~NU;$93c`prar=3(C zE?KOs!nmoQ1H60*ul3@EmY|Qd@e;DVd+yePa7x6+5^YG1^_rOOW?l07J23Hm{3siq zGSsi*)Oa6t3Yn(h*{U-LuEJARj?CB5Z ze1-|+FHD5MSNEbQ*MnuSqN`X435tE1O{bwcVp1^4nlOVJ<1n^0cXVOB>!ng3U-19P z#=_zJ_wWE^h|`P-?--Mg*A*BLEOR;5iouK-t3=gwD_VhyScz|stQ>*L&ndwr4(7T~CVftS>;GI)cp$HasmceztZ8^C`S$x{Pb3W~3Yr&!Wig@6Fhd?@Lq zWyZ8v{1w*F?|+=NdjsqstOvl6f;QU>fTNrbZd8M4Q7hggAJVqOlYCh!l(cSU<7p2f z*2d3?S=jk8c6z|lN5Fp2@sL?lVCZ1)W(@48*=3!W7D>OmlCD8A3>dg1ErVncLrJen zfis)x{TMDF0vQs1yaECcZjSUy94k^%qLSdys3g~7Adr-lpJ%L|(T4VxaRxkpEnXi3 z0bmwh!!TPjuSV8jfWL}#%*cJ1-=TF@(yc_H--@jzUAlA|73$u2`wioLU&0Vff#muu zkl!1olhuP2S}kI&L}gIOaL9sW_*$fut_<^=_-a3D9|55PLXc1lwt0;c`XbNe0wb^WyX9s#*mx!-~Nm{ z-bz_n*gVpOEYsJwx3v8rQQ3lT?$GUDjsqLVh&}x^cPD!Vc@uK=>)~`;F97vRnL+<+ zgYq=d>KE81EUZ`*w$u+n?-wlKS+F2?V1fOH6yU*+0TUK5vYZ42=YjEEklbL40R|#% zXs>@-80Qc?nQPemQt*dG7eSJnbAG&tOT=j|v%fFRO(B)Y$Ia_lu@T0IiQ}s%@|NkD z(?>+_@8y^2;~d#^(LM1?95Wn@0+FBwElJZ)V*|7a zBnq^YCIs3cLJR+XaML@ch8feRhiom28(tJL2a^5^lJap;OM0MJzl*Hj=mc9{)m-Oy zSkp-c#~6?asZ$Ir2Aqhdc6&)!RcIK{^5NHz=%qEI5=NHP+P$NGn>ktVH@`Q8xk;78 zuS~1z7a98!3-;MyX!Po~)#sQf;wn`lFACnK$U4=3VnEicHLa}Hv>ad664r~|!lHgb z_+Ys*gMW(vUNx&V&7r@H$Ejhhc>Oo#6HxVA4BF8Bdp`aL1bNz2!FMf1@kgUc5Z;5U z|H#}AFz9Bt>X(uBIpSL(5;4cMst?AR16$Sq2{8)9kT(j(J8EVP6XayRZN8mi49{Aw z)@BDXW|ra;WpDnEqNe?LqDeuQf}#l11oWvMNqhrQc99+6gc9g;?fr-ZTszcqaERpCV&>*k{W?T1g3d_OJ z-4Ah+?@+E$5AZAd5QHXd*1?LxrciuGQy{Fu;lWrpFezviuTM==8lyQD=3t~PnU?uF z)eTHTG^{Ebg0UA9QxL>kn7m)a2u!r#0jq5s$i=jw-oPF5it~n*bg{UAtM7CKVfu9B zR0Iej)Icm5LqfhE z)`P&iQGEd^UVN1a+8yG1B|+T!w2v)4GcR2@yD;m~oA1(77tZU*xrL`8qhpUVY!{F+ z58yrZhYSRrvZDB>c+4kNqfje4%RO`rEs~}^861IM+Mlxn31h^h>_91k#F(a^1)h3I zal`vvq)1-PYX+Bd5&_5#geQG)T*s*%iu`vR{U^B;9yqPPFN;atSc8Z`lg@QaH-Upo zvdy;Ce?_hNH0UZ|S$}{8MV^4A#&A=2L-}SJ&nWBh-#x-1P-!z~=PPkcH$E*-Kzi;K zo;JSF)pib~kapQKY@Jq!NjY%hfe`~2hhkh73UF8gOjTy{%th$4G;ix|qrSt4sJz(Q zT2$OS8mt@1-d$|bg$RjFNp<^1P8({c=V2!mYs;??AHt)l5NMwKONe<1U2;>i&YLKZ zX7kyC2$)x|;?QXn#$5Fgl-k_Wj8nFBKh7!PET^!|sD5&E13&cPVxP zQl9x+*7Lc(G+7Uj@?Di_n9w zVlinEZk|Orj?iWs7eBMe0@(*e8zL1e}~0zoqAy6Duyh|4+we6 zpv;OHR!oBVtX_yJILmg`ZPYdP{s{okOp)TL`iBhu5rcog;HwO1f`##AwOkGCb($VG z5rEW>qU>ZFOMyLAOL5H}1Ao{0SQm~+Hz8ZH@PO67vxr$sdd zfhT%7%IOk95#LCT^X)TSzMZ6A*Yw zfBMmQ6Kvf^y5Pc#%Nn0X#c58{yh!Z-(05AP3p z&WGS=8^Pn4kz#LW5c0nl!2(p^iP?o<)X*lv{dfR6IvFQ4GZ~l}NOl--*E8bg5t|qF zYGDp-X|tr?XH%(`_|unXEcG^^uONG2VCp$e*fpo2R0Xqe&%#_#unSy{8GJ8dmY1J7R7{j>=sV`sz5MFm+1Cy+SyX+q6$ zz*Vc@zq!48(07!FP@mBIN`-7hu-}KbyJUJ2IGLn0>T~!)nQ}Z#C6)n$fR+s>xxJXS z)pZlB03MmgBXtb*_LB?MMI`&bLTVhE03<0c2&vDtO!X$>kkZ#;ZW0R(F*;MIS74lg z;~KsY<5I&()|xmTx&B!zsna3nXLK4$3?K&c&h@_ta=YKEpTQTh_}ahQyw>`DR)p_-!LAyHO3d*CV&NqVk>hqoN;Jwk)J`Te#5r(bbHVi+gaR`&pbN|oU zHK*~8IzJIW+wRS8zpZ!&PBvoDi@{_U6_Z}9GLD!S^&q7W%M5xMZ;3x~0{jLimUv0rS&iPo z_-ycJ;v<^nhz%i2-5yP6W%) zOvCACTwG|Gx08Y3J*Y<~2csZMH|I${-e53Hx)=FC#O}XhT9Rp^7Iv#KzEq@mDv@Rv z94o!@L%&Zj93Y<}W=2q_7Q$&3gilcfX2!qnSq$!YX#=vt)tn2ch?=91qzI67g61?B zXL!2O0ayqT#|jD11m%}&H_$?WuqfnBtlr@26O^WJCn-r@$2yA`FLr72cVr(OJ;*wp z@N@H1{TglLiE29oG4H;@7Qde7&20O3KF!Oa=6hh8pYQ`MS%?^jFeqr z!Gt0J>6PxuvF4x5YJhU6DP%@Vs!q2jDbh#kc4@rlR>^ZZSO=JJK1F~+q_6tBhXjc+; zkH#Lc75q&g(vNEfm-9soMc-&Y0V7?^#oDto;R0m(FV`piyfah?fMr-sme3e0`Z`pK z0@*t}j_A(?UI*X6wFIstE_$nTA>h|hr{Sisp1lJ9u{2eVHQ3_7^WZ1ud>5t&u2RQd z2ZyWU_^|qMxP}bi_^@9y;NSpX>D{fqO4iL*2C>NSS-dp_q5rdRRp<*nr~6*%3p}Us zU25aQ>myd&w{>ih)$2dTI&^yP>~cMN`fcy6V!Vrr-q<7=7vek-{)z_^ua z4U9#$(e)wYm&v?gTt9>h&+FS-IAa!?zO)eUKL@zRnYNa#r=^t=ceht|SbgGk{z@ym zzVi;(s9!<;gav1f7AC)KSmIdAf;@mJt91Whf-ff zUBDgI;J@5T0hX-5l4Pvazkbgh>^xygUB9sxZ+so^Mj^6~-XY5gthia zfX6Z>Ct54MJ2q7A+{iTA#{l?cAm91Fg#P9QeLo=2}eqI(TxUq-)So<_N0>9_lq zuptAyZwNkRgv9vaV z$Lf|>wsc^XBVb7AA_|CpAN3x9hh_pY$a8C>tSGZxL^OnYh&_e*Ec}<+fhO&$bDk{L~A>+J+3LQ{q;9u z*Pm%6QSTU*s&=gHgb4X$Ye#D*sRenTp)H2j)Ykdt^E3W*9?o(W4+GWyrC4Ft2Z`d$ z0eygGF!?*vvzot#uS>xNuzG4+UJU1W;Z!xNXxFe67xp{)-b3tyEhY*} zcqlZZg?la4kGGn!%a@=Q1q|ju$s(uoj{4GNr;+Jzc;OwEJdb0&z^@R|B!{!Ip za2gbR;&2M<{Ji>Zyw}9S=+fcG(6W?RCh=Xh2kBEM#Y0O`LQ}6J@cM9y2IfE%@Dd9W zr5~=|MxcI-CE~Jrrq@`n@Vz2a;kXWo5-yFkWk?!c^4vweQ6dia**f-8%+9`a_TqvX z0rb)m;C0p2QmknIX{puW<`eA#Oo`S@adm=Hjn*1Or{`%v#YOmj;rvan-#>*+PXC}$ z&Kp+tRX_q4eyYXQGWJ{CLnx;i9AU7)AVWYt#TaJXh_>-R zf*k(gC2%S>_mvFJq2gI-u69AhsEsHJkm~yx9OUCC8T%N6k2BbUfWj-lhClNsm_$JY zB!|^iY}-it=QiP}YdC1!g2U|!PQvCU6E9i78A`>Kay~nQo^kDG{IizSpP_=O3B6vu ziA<0m)S^{*bW%Pzt`6VIXZ%be$-zpxKFAJT=cw}n*pjG{q8;Rua5CVd`X2=R51AjA z{Cunc7ilkwO$}k~s3UKdY|n7>NqnhbVf((ZsN--($!CJ2oS2gz44*Uzp3(4(l^(g0 z2aQI4k)qLW;M)=G6`?VLtLoHD_)pVB+Awj?LR%Sv8;EI+AssFuQ7pH_Vfrv*uyku9 z1`Qz8{nDsu45j%V-yemeCmc=048oK#N!PnlMx<6ccVpQGtN5dMpC^>k1%)L>%?WW6 zJz(y~vd(@OUSR8u(YTU<)Iq`rt$>?7g# zwCqCoX)=J^qxpMUFA1&9vYEho$~}+JC|sEcx-y9Dkpcs9yOIKZlGHo*X<9m`i?Seg&vlT=xTiga-&t zDyrLoGDJPdi}9F`UaCEcoX5~{&RzgUCt}hNU7SdlWBw&{i1<*nhZ^*UaeTGblf@#q zK@$me?0I}a#BjQmj><+45yOG>7=-ZsE*1hQ;^+=@9@br?PGho#7~e9b^HV4#n_7L% zck&WXK9)@zv_Rza5XI&|>=48j+(t0(OgDE2N~fiBtp?Oj&dM?^OgHcE$q(mja0`F( z#FoXPd}oAF(zs@DIh@Oy>pd8yr?X2U_q0|I??zKXmk@*eI=tsoU$ zqP`z@bPD$8T`xL8s0?HdIje9bg&LtN180zU|;S!7F)1>i&JDV znyh-_l8h+ugeqj=EaXe<-i3S_hkW@QAyr>##VdpEU}XrhdqREX685Ej<|w5s$^Di) zjD-*rqxM^Vxh+!eTT%`lw-+O!tRH_R9!RAkrQr-yV;YN^+ZaBas3WG37j_HuVWRorx#}toxCajrTc7^}5^X*xc01YPiC<)Bo6|1w97CNfYKiZE z=F8auSLN?Vu))*`VE)9(cmaDAuGp?lCO-9@XuO^;n+MOU##P9c4)%nzBLN)i&Or!< zYYSc4AcSLAHYUrDus^^BA|lIcNjwcnya3Ikh*OL?+PoAc>?(enMr~#BI}CW_$z~Q= z*;n@g_U0IhMPi|Vg+67RD@r}RDwx>NtDNCwknBc;JqQ!JsdfatiaS=ZLy|u$IF(k? zZSxTDjmFcaDs(T$kJ7Z-_C)ku`lO56sSTwOiP=L?Lzg}E=L|MmVTsEpzK5;lOu+eL zL+hkSSL--OCvi#ZsEwg5vNIU1 z1W^^vG1kp|R=6=`M8Q&GsDpG9Eh72_%e)Cm(mZ;&@))u9k&05j|M z$qdNLx4=wehDUDhf4<^Hi_UMV&(*H1|`C z{R{){#nxQmhZy6?Ri9_@>kPikKsG4+GGp&!@J$9BGWw7@YOZ>e3F;R4_!kJg3|G0! zCGCbN>baLaEqicn>jGFB_}I_0@Lw?yy8BVagrOygc41R8J#oaT-Auc|*dE4Yf$TT= zNFq}En8ry+-OJb)8IvyC%STG@>Joz&8Hn`q7K;@5NRm_*gEo0fY?4J@MX(i_j*AG- z=Q90`45Oec?2vgob2b&le}6VHG?-0hQ`u~4DEmYOWp18ti`>LWmspP9cPz)zAePi$MUP5!eryPv##usE zi`z!MdS1$=Dmb{tx#`E#9dR68Lm#6`(!y@s*NwZ@Gva2f__}N|zYSrTb2P--Q4HEa z4B9Ulzyo)e@=z67JWVt|@}AXlX}Nj}hx^JePiU=zmN{%e&+7>SXa0(FFeh8_F(_Uw zmvV4`$1fV`nRWq-c`}#4G9o|PldBbP@t_i29nB#;Wral!)5;Y5ZW4Z=L02%R35pC~ zeGh^re#uQAgF*r8n6BKp)>y?^c&HZ3vG_qLXjVAQcADla)*k%$+o2qOZ3gDWsUfWy zeTtR-Z3e%`;8g}PFy7CYn3t|FRz%?Sou^lfSje6r!TufQh^6WW7<&Y1U`Qgk{Vyhb zl0loQNzfk8t=1d#e>w`9$ssKgi-8+ajn&KO<3{>z?$8;7;f4Ej)HPJxB@1&R#M|v@ zo@9a5ajck!zP(X@n%C>#Pt5toF8*j9CU|YxW2ea}HO@8c_oz+>>HZc~>`mnl>`hMb zJmXjKJw3cRb}>Umi2|wzUR=yA>itaUY3`8T#)T&Pment^)q2h+A66B%O`cH`|5CWv zz8PH3Gy-^wqjeZI37obee&es3*fp4>%YNtl_=%5kRn?LYih z6zl+$ET)T#C9l6D!An8Vmfv3z#ZNuXhR6tbg0U|!_;Chv60kduFyOodSIS!}pcl>X z*+F`iles9#M*^qF3}p_?59@^kAG@+sub-K8;1qx8{9w=0g3KVnd%@3@pdo6IuW;@b zcl-R1eh&w{=#zXLZRZxDV3eh{B7l@7zx;L0u44V)(PvF>W9~Qu`g#Nv`FoA9&aXF5 z>|}un1YQP`JAVy9e8TcTB~G-t>4L8ut-&Wx{BjQgU1=&UcnQh5jRceMX<>EAuR;Sa0kUw>t7CCiT~zFV>5o9z7U^MHN}Q%=_dL zGMt1mItHQ;mN1{xame^MGyDXb@4SdWd$-YY)BT?@CQTYU}SW3$LMf&AUiPf!AS5wF)@}-{vW!H@J#>! literal 0 HcmV?d00001 diff --git a/main.py b/main.py new file mode 100644 index 0000000..e8f89d0 --- /dev/null +++ b/main.py @@ -0,0 +1,128 @@ +import tkinter +from tkinter import messagebox +from tkinter import filedialog +import os +import platform +import sys +import requests +import json +import py7zr +from pathlib import Path + +dsiVersions = ["1.0 - 1.3 (USA, EUR, AUS, JPN)", "1.4 - 1.4.5 (USA, EUR, AUS, JPN)", "All versions (KOR, CHN)"] +memoryPitLinks = ["https://github.com/YourKalamity/just-a-dsi-cfw-installer/raw/master/assets/files/memoryPit/256/pit.bin","https://github.com/YourKalamity/just-a-dsi-cfw-installer/raw/master/assets/files/memoryPit/768_1024/pit.bin"] + +window = tkinter.Tk() +window.sourceFolder = '' +window.sourceFile = '' +SDlabel = tkinter.Label(text = "SD card directory") +SDlabel.width = 100 +SDentry = tkinter.Entry() +SDentry.width = 100 + +def getLatestTWLmenu(): + release = json.loads(requests.get("https://api.github.com/repos/DS-Homebrew/TWiLightMenu/releases/latest").content) + url = release["assets"][0]["browser_download_url"] + return url + +def outputbox(message): + outputBox.configure(state='normal') + outputBox.insert('end', message) + outputBox.configure(state='disabled') + +def validateDirectory(directory): + try: + directory = str(directory) + except TypeError: + outputbox("That's not a valid directory") + return False + try: + string = directory + "/test.file" + with open(string, 'w') as file: + file.close() + os.remove(string) + except FileNotFoundError: + outputbox("That's not a valid directory") + outputbox(" or you do not have the") + outputbox(" permission to write there") + return False + except PermissionError: + outputbox("You do not have write") + outputbox(" access to that folder") + return False + else: + return True + +def start(): + outputBox.delete(0, tkinter.END) + #Variables + directory = SDentry.get() + version = firmwareVersion.get() + unlaunchNeeded = unlaunch.get() + + directoryValidated = validateDirectory(directory) + if directoryValidated == False: + return + if dsiVersions.index(version) == 1: + memoryPitDownload = memoryPitLinks[1] + elif dsiVersions.index(version) in [0,2]: + memoryPitDownload = memoryPitLinks[0] + + temp = directory + "/tmp/" + Path(temp).mkdir(parents=True,exist_ok=True) + + #Download Memory Pit + memoryPitLocation = directory + "/private/ds/app/484E494A/" + Path(memoryPitLocation).mkdir(parents=True, exist_ok=True) + r = requests.get(memoryPitDownload, allow_redirects=True) + memoryPitLocation = memoryPitLocation + "pit.bin" + open(memoryPitLocation, 'wb').write(r.content) + outputbox("Memory Pit Downloaded ") + + #Download TWiLight Menu + r = requests.get(getLatestTWLmenu(), allow_redirects=True) + TWLmenuLocation = temp + "TWiLightMenu.7z" + open(TWLmenuLocation,'wb').write(r.content) + outputbox("TWiLight Menu ++ Downloaded ") + + #Extract TWiLight Menu + archive = py7zr.SevenZipFile(TWLmenuLocation, mode='r') + archive.extractall(path=temp) + archive.close() + +def chooseDir(): + window.sourceFolder = filedialog.askdirectory(parent=window, initialdir= "/", title='Please select the directory of your SD card') + SDentry.delete(0, tkinter.END) + SDentry.insert(0, window.sourceFolder) +b_chooseDir = tkinter.Button(window, text = "Choose Folder", width = 20, command = chooseDir) +b_chooseDir.width = 100 +b_chooseDir.height = 50 + +firmwareLabel = tkinter.Label(text = "Select your DSi firmware") +firmwareLabel.width = 100 + +firmwareVersion = tkinter.StringVar(window) +firmwareVersion.set(dsiVersions[0]) +selector = tkinter.OptionMenu(window, firmwareVersion, *dsiVersions) +selector.width = 100 + +unlaunch = tkinter.IntVar(value=1) +unlaunchCheck = tkinter.Checkbutton(window, text = "Install Unlaunch?", variable =unlaunch) + +startButton = tkinter.Button(window, text = "Start", width = 20, command = start) +outputLabel = tkinter.Label(text="Output") +outputLabel.width = 100 +outputBox = tkinter.Text(window,state='disabled', width = 30, height = 10) + + +SDlabel.pack() +SDentry.pack() +b_chooseDir.pack() +firmwareLabel.pack() +selector.pack() +unlaunchCheck.pack() +startButton.pack() +outputLabel.pack() +outputBox.pack() +window.mainloop() + diff --git a/py7zr/archiveinfo.py b/py7zr/archiveinfo.py new file mode 100644 index 0000000..2e2b95e --- /dev/null +++ b/py7zr/archiveinfo.py @@ -0,0 +1,1094 @@ +#!/usr/bin/python -u +# +# p7zr library +# +# Copyright (c) 2019,2020 Hiroshi Miura +# Copyright (c) 2004-2015 by Joachim Bauch, mail@joachim-bauch.de +# 7-Zip Copyright (C) 1999-2010 Igor Pavlov +# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import functools +import io +import os +import struct +from binascii import unhexlify +from functools import reduce +from io import BytesIO +from operator import and_, or_ +from struct import pack, unpack +from typing import Any, BinaryIO, Dict, List, Optional, Tuple + +from py7zr.compression import SevenZipCompressor, SevenZipDecompressor +from py7zr.exceptions import Bad7zFile +from py7zr.helpers import ArchiveTimestamp, calculate_crc32 +from py7zr.properties import MAGIC_7Z, CompressionMethod, Property + +MAX_LENGTH = 65536 +P7ZIP_MAJOR_VERSION = b'\x00' +P7ZIP_MINOR_VERSION = b'\x04' + + +def read_crcs(file: BinaryIO, count: int) -> List[int]: + data = file.read(4 * count) + return [unpack(' Tuple[bytes, ...]: + return unpack(b'B' * length, file.read(length)) + + +def read_byte(file: BinaryIO) -> int: + return ord(file.read(1)) + + +def write_bytes(file: BinaryIO, data: bytes): + return file.write(data) + + +def write_byte(file: BinaryIO, data): + assert len(data) == 1 + return write_bytes(file, data) + + +def read_real_uint64(file: BinaryIO) -> Tuple[int, bytes]: + """read 8 bytes, return unpacked value as a little endian unsigned long long, and raw data.""" + res = file.read(8) + a = unpack(' Tuple[int, bytes]: + """read 4 bytes, return unpacked value as a little endian unsigned long, and raw data.""" + res = file.read(4) + a = unpack(' int: + """read UINT64, definition show in write_uint64()""" + b = ord(file.read(1)) + if b == 255: + return read_real_uint64(file)[0] + blen = [(0b01111111, 0), (0b10111111, 1), (0b11011111, 2), (0b11101111, 3), + (0b11110111, 4), (0b11111011, 5), (0b11111101, 6), (0b11111110, 7)] + mask = 0x80 + vlen = 8 + for v, l in blen: + if b <= v: + vlen = l + break + mask >>= 1 + if vlen == 0: + return b & (mask - 1) + val = file.read(vlen) + value = int.from_bytes(val, byteorder='little') + highpart = b & (mask - 1) + return value + (highpart << (vlen * 8)) + + +def write_real_uint64(file: BinaryIO, value: int): + """write 8 bytes, as an unsigned long long.""" + file.write(pack(' 0x01ffffffffffffff: + file.write(b'\xff') + file.write(value.to_bytes(8, 'little')) + return + byte_length = (value.bit_length() + 7) // 8 + ba = bytearray(value.to_bytes(byte_length, 'little')) + high_byte = int(ba[-1]) + if high_byte < 2 << (8 - byte_length - 1): + for x in range(byte_length - 1): + high_byte |= 0x80 >> x + file.write(pack('B', high_byte)) + file.write(ba[:byte_length - 1]) + else: + mask = 0x80 + for x in range(byte_length): + mask |= 0x80 >> x + file.write(pack('B', mask)) + file.write(ba) + + +def read_boolean(file: BinaryIO, count: int, checkall: bool = False) -> List[bool]: + if checkall: + all_defined = file.read(1) + if all_defined != unhexlify('00'): + return [True] * count + result = [] + b = 0 + mask = 0 + for i in range(count): + if mask == 0: + b = ord(file.read(1)) + mask = 0x80 + result.append(b & mask != 0) + mask >>= 1 + return result + + +def write_boolean(file: BinaryIO, booleans: List[bool], all_defined: bool = False): + if all_defined and reduce(and_, booleans, True): + file.write(b'\x01') + return + elif all_defined: + file.write(b'\x00') + o = bytearray(-(-len(booleans) // 8)) + for i, b in enumerate(booleans): + if b: + o[i // 8] |= 1 << (7 - i % 8) + file.write(o) + + +def read_utf16(file: BinaryIO) -> str: + """read a utf-16 string from file""" + val = '' + for _ in range(MAX_LENGTH): + ch = file.read(2) + if ch == unhexlify('0000'): + break + val += ch.decode('utf-16LE') + return val + + +def write_utf16(file: BinaryIO, val: str): + """write a utf-16 string to file""" + for c in val: + file.write(c.encode('utf-16LE')) + file.write(b'\x00\x00') + + +def bits_to_bytes(bit_length: int) -> int: + return - (-bit_length // 8) + + +class ArchiveProperties: + + __slots__ = ['property_data'] + + def __init__(self): + self.property_data = [] + + @classmethod + def retrieve(cls, file): + return cls()._read(file) + + def _read(self, file): + pid = file.read(1) + if pid == Property.ARCHIVE_PROPERTIES: + while True: + ptype = file.read(1) + if ptype == Property.END: + break + size = read_uint64(file) + props = read_bytes(file, size) + self.property_data.append(props) + return self + + def write(self, file): + if len(self.property_data) > 0: + write_byte(file, Property.ARCHIVE_PROPERTIES) + for data in self.property_data: + write_uint64(file, len(data)) + write_bytes(file, data) + write_byte(file, Property.END) + + +class PackInfo: + """ information about packed streams """ + + __slots__ = ['packpos', 'numstreams', 'packsizes', 'packpositions', 'crcs'] + + def __init__(self) -> None: + self.packpos = 0 # type: int + self.numstreams = 0 # type: int + self.packsizes = [] # type: List[int] + self.crcs = None # type: Optional[List[int]] + + @classmethod + def retrieve(cls, file: BinaryIO): + return cls()._read(file) + + def _read(self, file: BinaryIO): + self.packpos = read_uint64(file) + self.numstreams = read_uint64(file) + pid = file.read(1) + if pid == Property.SIZE: + self.packsizes = [read_uint64(file) for _ in range(self.numstreams)] + pid = file.read(1) + if pid == Property.CRC: + self.crcs = [read_uint64(file) for _ in range(self.numstreams)] + pid = file.read(1) + if pid != Property.END: + raise Bad7zFile('end id expected but %s found' % repr(pid)) + self.packpositions = [sum(self.packsizes[:i]) for i in range(self.numstreams + 1)] # type: List[int] + return self + + def write(self, file: BinaryIO): + assert self.packpos is not None + numstreams = len(self.packsizes) + assert self.crcs is None or len(self.crcs) == numstreams + write_byte(file, Property.PACK_INFO) + write_uint64(file, self.packpos) + write_uint64(file, numstreams) + write_byte(file, Property.SIZE) + for size in self.packsizes: + write_uint64(file, size) + if self.crcs is not None: + write_bytes(file, Property.CRC) + for crc in self.crcs: + write_uint64(file, crc) + write_byte(file, Property.END) + + +class Folder: + """ a "Folder" represents a stream of compressed data. + coders: list of coder + num_coders: length of coders + coder: hash list + keys of coders: method, numinstreams, numoutstreams, properties + unpacksizes: uncompressed sizes of outstreams + """ + + __slots__ = ['unpacksizes', 'solid', 'coders', 'digestdefined', 'totalin', 'totalout', + 'bindpairs', 'packed_indices', 'crc', 'decompressor', 'compressor', 'files'] + + def __init__(self) -> None: + self.unpacksizes = None # type: Optional[List[int]] + self.coders = [] # type: List[Dict[str, Any]] + self.bindpairs = [] # type: List[Any] + self.packed_indices = [] # type: List[int] + # calculated values + self.totalin = 0 # type: int + self.totalout = 0 # type: int + # internal values + self.solid = False # type: bool + self.digestdefined = False # type: bool + self.crc = None # type: Optional[int] + # compress/decompress objects + self.decompressor = None # type: Optional[SevenZipDecompressor] + self.compressor = None # type: Optional[SevenZipCompressor] + self.files = None + + @classmethod + def retrieve(cls, file: BinaryIO): + obj = cls() + obj._read(file) + return obj + + def _read(self, file: BinaryIO) -> None: + num_coders = read_uint64(file) + for _ in range(num_coders): + b = read_byte(file) + methodsize = b & 0xf + iscomplex = b & 0x10 == 0x10 + hasattributes = b & 0x20 == 0x20 + c = {'method': file.read(methodsize)} # type: Dict[str, Any] + if iscomplex: + c['numinstreams'] = read_uint64(file) + c['numoutstreams'] = read_uint64(file) + else: + c['numinstreams'] = 1 + c['numoutstreams'] = 1 + self.totalin += c['numinstreams'] + self.totalout += c['numoutstreams'] + if hasattributes: + proplen = read_uint64(file) + c['properties'] = file.read(proplen) + self.coders.append(c) + num_bindpairs = self.totalout - 1 + for i in range(num_bindpairs): + self.bindpairs.append((read_uint64(file), read_uint64(file),)) + num_packedstreams = self.totalin - num_bindpairs + if num_packedstreams == 1: + for i in range(self.totalin): + if self._find_in_bin_pair(i) < 0: # there is no in_bin_pair + self.packed_indices.append(i) + elif num_packedstreams > 1: + for i in range(num_packedstreams): + self.packed_indices.append(read_uint64(file)) + + def write(self, file: BinaryIO): + num_coders = len(self.coders) + assert num_coders > 0 + write_uint64(file, num_coders) + for i, c in enumerate(self.coders): + id = c['method'] # type: bytes + id_size = len(id) & 0x0f + iscomplex = 0x10 if not self.is_simple(c) else 0x00 + hasattributes = 0x20 if c['properties'] is not None else 0x00 + flag = struct.pack('B', id_size | iscomplex | hasattributes) + write_byte(file, flag) + write_bytes(file, id[:id_size]) + if not self.is_simple(c): + write_uint64(file, c['numinstreams']) + assert c['numoutstreams'] == 1 + write_uint64(file, c['numoutstreams']) + if c['properties'] is not None: + write_uint64(file, len(c['properties'])) + write_bytes(file, c['properties']) + num_bindpairs = self.totalout - 1 + assert len(self.bindpairs) == num_bindpairs + num_packedstreams = self.totalin - num_bindpairs + for bp in self.bindpairs: + write_uint64(file, bp[0]) + write_uint64(file, bp[1]) + if num_packedstreams > 1: + for pi in self.packed_indices: + write_uint64(file, pi) + + def is_simple(self, coder): + return coder['numinstreams'] == 1 and coder['numoutstreams'] == 1 + + def get_decompressor(self, size: int, reset: bool = False) -> SevenZipDecompressor: + if self.decompressor is not None and not reset: + return self.decompressor + else: + self.decompressor = SevenZipDecompressor(self.coders, size, self.crc) + return self.decompressor + + def get_compressor(self) -> SevenZipCompressor: + if self.compressor is not None: + return self.compressor + else: + try: + # FIXME: set filters + self.compressor = SevenZipCompressor() + self.coders = self.compressor.coders + return self.compressor + except Exception as e: + raise e + + def get_unpack_size(self) -> int: + if self.unpacksizes is None: + return 0 + for i in range(len(self.unpacksizes) - 1, -1, -1): + if self._find_out_bin_pair(i): + return self.unpacksizes[i] + raise TypeError('not found') + + def _find_in_bin_pair(self, index: int) -> int: + for idx, (a, b) in enumerate(self.bindpairs): + if a == index: + return idx + return -1 + + def _find_out_bin_pair(self, index: int) -> int: + for idx, (a, b) in enumerate(self.bindpairs): + if b == index: + return idx + return -1 + + def is_encrypted(self) -> bool: + return CompressionMethod.CRYPT_AES256_SHA256 in [x['method'] for x in self.coders] + + +class UnpackInfo: + """ combines multiple folders """ + + __slots__ = ['numfolders', 'folders', 'datastreamidx'] + + @classmethod + def retrieve(cls, file: BinaryIO): + obj = cls() + obj._read(file) + return obj + + def __init__(self): + self.numfolders = None + self.folders = [] + self.datastreamidx = None + + def _read(self, file: BinaryIO): + pid = file.read(1) + if pid != Property.FOLDER: + raise Bad7zFile('folder id expected but %s found' % repr(pid)) + self.numfolders = read_uint64(file) + self.folders = [] + external = read_byte(file) + if external == 0x00: + self.folders = [Folder.retrieve(file) for _ in range(self.numfolders)] + else: + datastreamidx = read_uint64(file) + current_pos = file.tell() + file.seek(datastreamidx, 0) + self.folders = [Folder.retrieve(file) for _ in range(self.numfolders)] + file.seek(current_pos, 0) + self._retrieve_coders_info(file) + + def _retrieve_coders_info(self, file: BinaryIO): + pid = file.read(1) + if pid != Property.CODERS_UNPACK_SIZE: + raise Bad7zFile('coders unpack size id expected but %s found' % repr(pid)) + for folder in self.folders: + folder.unpacksizes = [read_uint64(file) for _ in range(folder.totalout)] + pid = file.read(1) + if pid == Property.CRC: + defined = read_boolean(file, self.numfolders, checkall=True) + crcs = read_crcs(file, self.numfolders) + for idx, folder in enumerate(self.folders): + folder.digestdefined = defined[idx] + folder.crc = crcs[idx] + pid = file.read(1) + if pid != Property.END: + raise Bad7zFile('end id expected but %s found at %d' % (repr(pid), file.tell())) + + def write(self, file: BinaryIO): + assert self.numfolders is not None + assert self.folders is not None + assert self.numfolders == len(self.folders) + file.write(Property.UNPACK_INFO) + file.write(Property.FOLDER) + write_uint64(file, self.numfolders) + write_byte(file, b'\x00') + for folder in self.folders: + folder.write(file) + # If support external entity, we may write + # self.datastreamidx here. + # folder data will be written in another place. + # write_byte(file, b'\x01') + # assert self.datastreamidx is not None + # write_uint64(file, self.datastreamidx) + write_byte(file, Property.CODERS_UNPACK_SIZE) + for folder in self.folders: + for i in range(folder.totalout): + write_uint64(file, folder.unpacksizes[i]) + write_byte(file, Property.END) + + +class SubstreamsInfo: + """ defines the substreams of a folder """ + + __slots__ = ['digests', 'digestsdefined', 'unpacksizes', 'num_unpackstreams_folders'] + + def __init__(self): + self.digests = [] # type: List[int] + self.digestsdefined = [] # type: List[bool] + self.unpacksizes = None # type: Optional[List[int]] + self.num_unpackstreams_folders = [] # type: List[int] + + @classmethod + def retrieve(cls, file: BinaryIO, numfolders: int, folders: List[Folder]): + obj = cls() + obj._read(file, numfolders, folders) + return obj + + def _read(self, file: BinaryIO, numfolders: int, folders: List[Folder]): + pid = file.read(1) + if pid == Property.NUM_UNPACK_STREAM: + self.num_unpackstreams_folders = [read_uint64(file) for _ in range(numfolders)] + pid = file.read(1) + else: + self.num_unpackstreams_folders = [1] * numfolders + if pid == Property.SIZE: + self.unpacksizes = [] + for i in range(len(self.num_unpackstreams_folders)): + totalsize = 0 # type: int + for j in range(1, self.num_unpackstreams_folders[i]): + size = read_uint64(file) + self.unpacksizes.append(size) + totalsize += size + self.unpacksizes.append(folders[i].get_unpack_size() - totalsize) + pid = file.read(1) + num_digests = 0 + num_digests_total = 0 + for i in range(numfolders): + numsubstreams = self.num_unpackstreams_folders[i] + if numsubstreams != 1 or not folders[i].digestdefined: + num_digests += numsubstreams + num_digests_total += numsubstreams + if pid == Property.CRC: + defined = read_boolean(file, num_digests, checkall=True) + crcs = read_crcs(file, num_digests) + didx = 0 + for i in range(numfolders): + folder = folders[i] + numsubstreams = self.num_unpackstreams_folders[i] + if numsubstreams == 1 and folder.digestdefined and folder.crc is not None: + self.digestsdefined.append(True) + self.digests.append(folder.crc) + else: + for j in range(numsubstreams): + self.digestsdefined.append(defined[didx]) + self.digests.append(crcs[didx]) + didx += 1 + pid = file.read(1) + if pid != Property.END: + raise Bad7zFile('end id expected but %r found' % pid) + if not self.digestsdefined: + self.digestsdefined = [False] * num_digests_total + self.digests = [0] * num_digests_total + + def write(self, file: BinaryIO, numfolders: int): + assert self.num_unpackstreams_folders is not None + if len(self.num_unpackstreams_folders) == 0: + # nothing to write + return + if self.unpacksizes is None: + raise ValueError + write_byte(file, Property.SUBSTREAMS_INFO) + if not functools.reduce(lambda x, y: x and (y == 1), self.num_unpackstreams_folders, True): + write_byte(file, Property.NUM_UNPACK_STREAM) + for n in self.num_unpackstreams_folders: + write_uint64(file, n) + write_byte(file, Property.SIZE) + idx = 0 + for i in range(numfolders): + for j in range(1, self.num_unpackstreams_folders[i]): + size = self.unpacksizes[idx] + write_uint64(file, size) + idx += 1 + idx += 1 + if functools.reduce(lambda x, y: x or y, self.digestsdefined, False): + write_byte(file, Property.CRC) + write_boolean(file, self.digestsdefined, all_defined=True) + write_crcs(file, self.digests) + write_byte(file, Property.END) + + +class StreamsInfo: + """ information about compressed streams """ + + __slots__ = ['packinfo', 'unpackinfo', 'substreamsinfo'] + + def __init__(self): + self.packinfo = None # type: PackInfo + self.unpackinfo = None # type: UnpackInfo + self.substreamsinfo = None # type: Optional[SubstreamsInfo] + + @classmethod + def retrieve(cls, file: BinaryIO): + obj = cls() + obj.read(file) + return obj + + def read(self, file: BinaryIO) -> None: + pid = file.read(1) + if pid == Property.PACK_INFO: + self.packinfo = PackInfo.retrieve(file) + pid = file.read(1) + if pid == Property.UNPACK_INFO: + self.unpackinfo = UnpackInfo.retrieve(file) + pid = file.read(1) + if pid == Property.SUBSTREAMS_INFO: + self.substreamsinfo = SubstreamsInfo.retrieve(file, self.unpackinfo.numfolders, self.unpackinfo.folders) + pid = file.read(1) + if pid != Property.END: + raise Bad7zFile('end id expected but %s found' % repr(pid)) + + def write(self, file: BinaryIO): + write_byte(file, Property.MAIN_STREAMS_INFO) + self._write(file) + + def _write(self, file: BinaryIO): + if self.packinfo is not None: + self.packinfo.write(file) + if self.unpackinfo is not None: + self.unpackinfo.write(file) + if self.substreamsinfo is not None: + self.substreamsinfo.write(file, self.unpackinfo.numfolders) + write_byte(file, Property.END) + + +class HeaderStreamsInfo(StreamsInfo): + + def __init__(self): + super().__init__() + self.packinfo = PackInfo() + self.unpackinfo = UnpackInfo() + folder = Folder() + folder.compressor = SevenZipCompressor() + folder.coders = folder.compressor.coders + folder.solid = False + folder.digestdefined = False + folder.bindpairs = [] + folder.totalin = 1 + folder.totalout = 1 + folder.digestdefined = [True] + self.unpackinfo.numfolders = 1 + self.unpackinfo.folders = [folder] + + def write(self, file: BinaryIO): + self._write(file) + + +class FilesInfo: + """ holds file properties """ + + __slots__ = ['files', 'emptyfiles', 'antifiles'] + + def __init__(self): + self.files = [] # type: List[Dict[str, Any]] + self.emptyfiles = [] # type: List[bool] + self.antifiles = None + + @classmethod + def retrieve(cls, file: BinaryIO): + obj = cls() + obj._read(file) + return obj + + def _read(self, fp: BinaryIO): + numfiles = read_uint64(fp) + self.files = [{'emptystream': False} for _ in range(numfiles)] + numemptystreams = 0 + while True: + prop = fp.read(1) + if prop == Property.END: + break + size = read_uint64(fp) + if prop == Property.DUMMY: + # Added by newer versions of 7z to adjust padding. + fp.seek(size, os.SEEK_CUR) + continue + buffer = io.BytesIO(fp.read(size)) + if prop == Property.EMPTY_STREAM: + isempty = read_boolean(buffer, numfiles, checkall=False) + list(map(lambda x, y: x.update({'emptystream': y}), self.files, isempty)) # type: ignore + numemptystreams += isempty.count(True) + elif prop == Property.EMPTY_FILE: + self.emptyfiles = read_boolean(buffer, numemptystreams, checkall=False) + elif prop == Property.ANTI: + self.antifiles = read_boolean(buffer, numemptystreams, checkall=False) + elif prop == Property.NAME: + external = buffer.read(1) + if external == b'\x00': + self._read_name(buffer) + else: + dataindex = read_uint64(buffer) + current_pos = fp.tell() + fp.seek(dataindex, 0) + self._read_name(fp) + fp.seek(current_pos, 0) + elif prop == Property.CREATION_TIME: + self._read_times(buffer, 'creationtime') + elif prop == Property.LAST_ACCESS_TIME: + self._read_times(buffer, 'lastaccesstime') + elif prop == Property.LAST_WRITE_TIME: + self._read_times(buffer, 'lastwritetime') + elif prop == Property.ATTRIBUTES: + defined = read_boolean(buffer, numfiles, checkall=True) + external = buffer.read(1) + if external == b'\x00': + self._read_attributes(buffer, defined) + else: + dataindex = read_uint64(buffer) + # try to read external data + current_pos = fp.tell() + fp.seek(dataindex, 0) + self._read_attributes(fp, defined) + fp.seek(current_pos, 0) + elif prop == Property.START_POS: + self._read_start_pos(buffer) + else: + raise Bad7zFile('invalid type %r' % prop) + + def _read_name(self, buffer: BinaryIO) -> None: + for f in self.files: + f['filename'] = read_utf16(buffer).replace('\\', '/') + + def _read_attributes(self, buffer: BinaryIO, defined: List[bool]) -> None: + for idx, f in enumerate(self.files): + f['attributes'] = read_uint32(buffer)[0] if defined[idx] else None + + def _read_times(self, fp: BinaryIO, name: str) -> None: + defined = read_boolean(fp, len(self.files), checkall=True) + # NOTE: the "external" flag is currently ignored, should be 0x00 + external = fp.read(1) + assert external == b'\x00' + for i, f in enumerate(self.files): + f[name] = ArchiveTimestamp(read_real_uint64(fp)[0]) if defined[i] else None + + def _read_start_pos(self, fp: BinaryIO) -> None: + defined = read_boolean(fp, len(self.files), checkall=True) + # NOTE: the "external" flag is currently ignored, should be 0x00 + external = fp.read(1) + assert external == 0x00 + for i, f in enumerate(self.files): + f['startpos'] = read_real_uint64(fp)[0] if defined[i] else None + + def _write_times(self, fp: BinaryIO, propid, name: str) -> None: + write_byte(fp, propid) + defined = [] # type: List[bool] + num_defined = 0 # type: int + for f in self.files: + if name in f.keys(): + if f[name] is not None: + defined.append(True) + num_defined += 1 + size = num_defined * 8 + 2 + if not reduce(and_, defined, True): + size += bits_to_bytes(num_defined) + write_uint64(fp, size) + write_boolean(fp, defined, all_defined=True) + write_byte(fp, b'\x00') + for i, file in enumerate(self.files): + if defined[i]: + write_real_uint64(fp, ArchiveTimestamp.from_datetime(file[name])) + else: + pass + + def _write_prop_bool_vector(self, fp: BinaryIO, propid, vector) -> None: + write_byte(fp, propid) + write_boolean(fp, vector, all_defined=True) + + @staticmethod + def _are_there(vector) -> bool: + if vector is not None: + if functools.reduce(or_, vector, False): + return True + return False + + def _write_names(self, file: BinaryIO): + name_defined = 0 + names = [] + name_size = 0 + for f in self.files: + if f.get('filename', None) is not None: + name_defined += 1 + names.append(f['filename']) + name_size += len(f['filename'].encode('utf-16LE')) + 2 # len(str + NULL_WORD) + if name_defined > 0: + write_byte(file, Property.NAME) + write_uint64(file, name_size + 1) + write_byte(file, b'\x00') + for n in names: + write_utf16(file, n) + + def _write_attributes(self, file): + defined = [] # type: List[bool] + num_defined = 0 + for f in self.files: + if 'attributes' in f.keys() and f['attributes'] is not None: + defined.append(True) + num_defined += 1 + else: + defined.append(False) + size = num_defined * 4 + 2 + if num_defined != len(defined): + size += bits_to_bytes(num_defined) + write_byte(file, Property.ATTRIBUTES) + write_uint64(file, size) + write_boolean(file, defined, all_defined=True) + write_byte(file, b'\x00') + for i, f in enumerate(self.files): + if defined[i]: + write_uint32(file, f['attributes']) + + def write(self, file: BinaryIO): + assert self.files is not None + write_byte(file, Property.FILES_INFO) + numfiles = len(self.files) + write_uint64(file, numfiles) + emptystreams = [] # List[bool] + for f in self.files: + emptystreams.append(f['emptystream']) + if self._are_there(emptystreams): + write_byte(file, Property.EMPTY_STREAM) + write_uint64(file, bits_to_bytes(numfiles)) + write_boolean(file, emptystreams, all_defined=False) + else: + if self._are_there(self.emptyfiles): + self._write_prop_bool_vector(file, Property.EMPTY_FILE, self.emptyfiles) + if self._are_there(self.antifiles): + self._write_prop_bool_vector(file, Property.ANTI, self.antifiles) + # Name + self._write_names(file) + # timestamps + self._write_times(file, Property.CREATION_TIME, 'creationtime') + self._write_times(file, Property.LAST_ACCESS_TIME, 'lastaccesstime') + self._write_times(file, Property.LAST_WRITE_TIME, 'lastwritetime') + # start_pos + # FIXME: TBD + # attribute + self._write_attributes(file) + write_byte(file, Property.END) + + +class Header: + """ the archive header """ + + __slot__ = ['solid', 'properties', 'additional_streams', 'main_streams', 'files_info', + 'size', '_start_pos'] + + def __init__(self) -> None: + self.solid = False + self.properties = None + self.additional_streams = None + self.main_streams = None + self.files_info = None + self.size = 0 # fixme. Not implemented yet + self._start_pos = 0 + + @classmethod + def retrieve(cls, fp: BinaryIO, buffer: BytesIO, start_pos: int): + obj = cls() + obj._read(fp, buffer, start_pos) + return obj + + def _read(self, fp: BinaryIO, buffer: BytesIO, start_pos: int) -> None: + self._start_pos = start_pos + fp.seek(self._start_pos) + self._decode_header(fp, buffer) + + def _decode_header(self, fp: BinaryIO, buffer: BytesIO) -> None: + """ + Decode header data or encoded header data from buffer. + When buffer consist of encoded buffer, it get stream data + from it and call itself recursively + """ + pid = buffer.read(1) + if not pid: + # empty archive + return + elif pid == Property.HEADER: + self._extract_header_info(buffer) + return + elif pid != Property.ENCODED_HEADER: + raise TypeError('Unknown field: %r' % id) + # get from encoded header + streams = HeaderStreamsInfo.retrieve(buffer) + self._decode_header(fp, self._get_headerdata_from_streams(fp, streams)) + + def _get_headerdata_from_streams(self, fp: BinaryIO, streams: StreamsInfo) -> BytesIO: + """get header data from given streams.unpackinfo and packinfo. + folder data are stored in raw data positioned in afterheader.""" + buffer = io.BytesIO() + src_start = self._start_pos + for folder in streams.unpackinfo.folders: + uncompressed = folder.unpacksizes + if not isinstance(uncompressed, (list, tuple)): + uncompressed = [uncompressed] * len(folder.coders) + compressed_size = streams.packinfo.packsizes[0] + uncompressed_size = uncompressed[-1] + + src_start += streams.packinfo.packpos + fp.seek(src_start, 0) + decompressor = folder.get_decompressor(compressed_size) + folder_data = decompressor.decompress(fp.read(compressed_size))[:uncompressed_size] + src_start += uncompressed_size + if folder.digestdefined: + if folder.crc != calculate_crc32(folder_data): + raise Bad7zFile('invalid block data') + buffer.write(folder_data) + buffer.seek(0, 0) + return buffer + + def _encode_header(self, file: BinaryIO, afterheader: int): + startpos = file.tell() + packpos = startpos - afterheader + buf = io.BytesIO() + _, raw_header_len, raw_crc = self.write(buf, 0, False) + streams = HeaderStreamsInfo() + streams.packinfo.packpos = packpos + folder = streams.unpackinfo.folders[0] + folder.crc = [raw_crc] + folder.unpacksizes = [raw_header_len] + compressed_len = 0 + buf.seek(0, 0) + data = buf.read(io.DEFAULT_BUFFER_SIZE) + while data: + out = folder.compressor.compress(data) + compressed_len += len(out) + file.write(out) + data = buf.read(io.DEFAULT_BUFFER_SIZE) + out = folder.compressor.flush() + compressed_len += len(out) + file.write(out) + # + streams.packinfo.packsizes = [compressed_len] + # actual header start position + startpos = file.tell() + write_byte(file, Property.ENCODED_HEADER) + streams.write(file) + write_byte(file, Property.END) + return startpos + + def write(self, file: BinaryIO, afterheader: int, encoded: bool = True): + startpos = file.tell() + if encoded: + startpos = self._encode_header(file, afterheader) + else: + write_byte(file, Property.HEADER) + # Archive properties + if self.main_streams is not None: + self.main_streams.write(file) + # Files Info + if self.files_info is not None: + self.files_info.write(file) + if self.properties is not None: + self.properties.write(file) + # AdditionalStreams + if self.additional_streams is not None: + self.additional_streams.write(file) + write_byte(file, Property.END) + endpos = file.tell() + header_len = endpos - startpos + file.seek(startpos, io.SEEK_SET) + crc = calculate_crc32(file.read(header_len)) + file.seek(endpos, io.SEEK_SET) + return startpos, header_len, crc + + def _extract_header_info(self, fp: BinaryIO) -> None: + pid = fp.read(1) + if pid == Property.ARCHIVE_PROPERTIES: + self.properties = ArchiveProperties.retrieve(fp) + pid = fp.read(1) + if pid == Property.ADDITIONAL_STREAMS_INFO: + self.additional_streams = StreamsInfo.retrieve(fp) + pid = fp.read(1) + if pid == Property.MAIN_STREAMS_INFO: + self.main_streams = StreamsInfo.retrieve(fp) + pid = fp.read(1) + if pid == Property.FILES_INFO: + self.files_info = FilesInfo.retrieve(fp) + pid = fp.read(1) + if pid != Property.END: + raise Bad7zFile('end id expected but %s found' % (repr(pid))) + + @staticmethod + def build_header(folders): + header = Header() + header.files_info = FilesInfo() + header.main_streams = StreamsInfo() + header.main_streams.packinfo = PackInfo() + header.main_streams.packinfo.numstreams = 0 + header.main_streams.packinfo.packpos = 0 + header.main_streams.unpackinfo = UnpackInfo() + header.main_streams.unpackinfo.numfolders = len(folders) + header.main_streams.unpackinfo.folders = folders + header.main_streams.substreamsinfo = SubstreamsInfo() + header.main_streams.substreamsinfo.num_unpackstreams_folders = [len(folders)] + header.main_streams.substreamsinfo.unpacksizes = [] + return header + + +class SignatureHeader: + """The SignatureHeader class hold information of a signature header of archive.""" + + __slots__ = ['version', 'startheadercrc', 'nextheaderofs', 'nextheadersize', 'nextheadercrc'] + + def __init__(self) -> None: + self.version = (P7ZIP_MAJOR_VERSION, P7ZIP_MINOR_VERSION) # type: Tuple[bytes, ...] + self.startheadercrc = None # type: Optional[int] + self.nextheaderofs = None # type: Optional[int] + self.nextheadersize = None # type: Optional[int] + self.nextheadercrc = None # type: Optional[int] + + @classmethod + def retrieve(cls, file: BinaryIO): + obj = cls() + obj._read(file) + return obj + + def _read(self, file: BinaryIO) -> None: + file.seek(len(MAGIC_7Z), 0) + self.version = read_bytes(file, 2) + self.startheadercrc, _ = read_uint32(file) + self.nextheaderofs, data = read_real_uint64(file) + crc = calculate_crc32(data) + self.nextheadersize, data = read_real_uint64(file) + crc = calculate_crc32(data, crc) + self.nextheadercrc, data = read_uint32(file) + crc = calculate_crc32(data, crc) + if crc != self.startheadercrc: + raise Bad7zFile('invalid header data') + + def calccrc(self, length: int, header_crc: int): + self.nextheadersize = length + self.nextheadercrc = header_crc + assert self.nextheaderofs is not None + buf = io.BytesIO() + write_real_uint64(buf, self.nextheaderofs) + write_real_uint64(buf, self.nextheadersize) + write_uint32(buf, self.nextheadercrc) + startdata = buf.getvalue() + self.startheadercrc = calculate_crc32(startdata) + + def write(self, file: BinaryIO): + assert self.startheadercrc is not None + assert self.nextheadercrc is not None + assert self.nextheaderofs is not None + assert self.nextheadersize is not None + file.seek(0, 0) + write_bytes(file, MAGIC_7Z) + write_byte(file, self.version[0]) + write_byte(file, self.version[1]) + write_uint32(file, self.startheadercrc) + write_real_uint64(file, self.nextheaderofs) + write_real_uint64(file, self.nextheadersize) + write_uint32(file, self.nextheadercrc) + + def _write_skelton(self, file: BinaryIO): + file.seek(0, 0) + write_bytes(file, MAGIC_7Z) + write_byte(file, self.version[0]) + write_byte(file, self.version[1]) + write_uint32(file, 1) + write_real_uint64(file, 2) + write_real_uint64(file, 3) + write_uint32(file, 4) + + +class FinishHeader(): + """Finish header for multi-volume 7z file.""" + + def __init__(self): + self.archive_start_offset = None # data offset from end of the finish header + self.additional_start_block_size = None # start signature & start header size + self.finish_header_size = 20 + 16 + + @classmethod + def retrieve(cls, file): + obj = cls() + obj._read(file) + return obj + + def _read(self, file): + self.archive_start_offset = read_uint64(file) + self.additional_start_block_size = read_uint64(file) diff --git a/py7zr/callbacks.py b/py7zr/callbacks.py new file mode 100644 index 0000000..6b2c083 --- /dev/null +++ b/py7zr/callbacks.py @@ -0,0 +1,61 @@ +#!/usr/bin/python -u +# +# p7zr library +# +# Copyright (c) 2020 Hiroshi Miura +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +from abc import ABC, abstractmethod + + +class Callback(ABC): + """Abstrat base class for progress callbacks.""" + + @abstractmethod + def report_start_preparation(self): + """report a start of preparation event such as making list of files and looking into its properties.""" + pass + + @abstractmethod + def report_start(self, processing_file_path, processing_bytes): + """report a start event of specified archive file and its input bytes.""" + pass + + @abstractmethod + def report_end(self, processing_file_path, wrote_bytes): + """report an end event of specified archive file and its output bytes.""" + pass + + @abstractmethod + def report_warning(self, message): + """report an warning event with its message""" + pass + + @abstractmethod + def report_postprocess(self): + """report a start of post processing event such as set file properties and permissions or creating symlinks.""" + pass + + +class ExtractCallback(Callback): + """Abstrat base class for extraction progress callbacks.""" + pass + + +class ArchiveCallback(Callback): + """Abstrat base class for progress callbacks.""" + pass diff --git a/py7zr/cli.py b/py7zr/cli.py new file mode 100644 index 0000000..3d7808f --- /dev/null +++ b/py7zr/cli.py @@ -0,0 +1,317 @@ +#!/usr/bin/env python +# +# Pure python p7zr implementation +# Copyright (C) 2019, 2020 Hiroshi Miura +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +import argparse +import getpass +import os +import pathlib +import re +import shutil +import sys +from lzma import CHECK_CRC64, CHECK_SHA256, is_check_supported +from typing import Any, List, Optional + +import texttable # type: ignore + +import py7zr +from py7zr.callbacks import ExtractCallback +from py7zr.helpers import Local +from py7zr.properties import READ_BLOCKSIZE, SupportedMethods + + +class CliExtractCallback(ExtractCallback): + + def __init__(self, total_bytes, ofd=sys.stdout): + self.ofd = ofd + self.archive_total = total_bytes + self.total_bytes = 0 + self.columns, _ = shutil.get_terminal_size(fallback=(80, 24)) + self.pwidth = 0 + + def report_start_preparation(self): + pass + + def report_start(self, processing_file_path, processing_bytes): + self.ofd.write('- {}'.format(processing_file_path)) + self.pwidth += len(processing_file_path) + 2 + + def report_end(self, processing_file_path, wrote_bytes): + self.total_bytes += int(wrote_bytes) + plest = self.columns - self.pwidth + progress = self.total_bytes / self.archive_total + msg = '({:.0%})\n'.format(progress) + if plest - len(msg) > 0: + self.ofd.write(msg.rjust(plest)) + else: + self.ofd.write(msg) + self.pwidth = 0 + + def report_postprocess(self): + pass + + def report_warning(self, message): + pass + + +class Cli(): + + dunits = {'b': 1, 'B': 1, 'k': 1024, 'K': 1024, 'm': 1024 * 1024, 'M': 1024 * 1024, + 'g': 1024 * 1024 * 1024, 'G': 1024 * 1024 * 1024} + + def __init__(self): + self.parser = self._create_parser() + self.unit_pattern = re.compile(r'^([0-9]+)([bkmg]?)$', re.IGNORECASE) + + def run(self, arg: Optional[Any] = None) -> int: + args = self.parser.parse_args(arg) + return args.func(args) + + def _create_parser(self): + parser = argparse.ArgumentParser(prog='py7zr', description='py7zr', + formatter_class=argparse.RawTextHelpFormatter, add_help=True) + subparsers = parser.add_subparsers(title='subcommands', help='subcommand for py7zr l .. list, x .. extract,' + ' t .. check integrity, i .. information') + list_parser = subparsers.add_parser('l') + list_parser.set_defaults(func=self.run_list) + list_parser.add_argument("arcfile", help="7z archive file") + list_parser.add_argument("--verbose", action="store_true", help="verbose output") + extract_parser = subparsers.add_parser('x') + extract_parser.set_defaults(func=self.run_extract) + extract_parser.add_argument("arcfile", help="7z archive file") + extract_parser.add_argument("odir", nargs="?", help="output directory") + extract_parser.add_argument("-P", "--password", action="store_true", + help="Password protected archive(you will be asked a password).") + extract_parser.add_argument("--verbose", action="store_true", help="verbose output") + create_parser = subparsers.add_parser('c') + create_parser.set_defaults(func=self.run_create) + create_parser.add_argument("arcfile", help="7z archive file") + create_parser.add_argument("filenames", nargs="+", help="filenames to archive") + create_parser.add_argument("-v", "--volume", nargs=1, help="Create volumes.") + test_parser = subparsers.add_parser('t') + test_parser.set_defaults(func=self.run_test) + test_parser.add_argument("arcfile", help="7z archive file") + info_parser = subparsers.add_parser("i") + info_parser.set_defaults(func=self.run_info) + parser.set_defaults(func=self.show_help) + return parser + + def show_help(self, args): + self.parser.print_help() + return(0) + + def run_info(self, args): + print("py7zr version {} {}".format(py7zr.__version__, py7zr.__copyright__)) + print("Formats:") + table = texttable.Texttable() + table.set_deco(texttable.Texttable.HEADER) + table.set_cols_dtype(['t', 't']) + table.set_cols_align(["l", "r"]) + for f in SupportedMethods.formats: + m = ''.join(' {:02x}'.format(x) for x in f['magic']) + table.add_row([f['name'], m]) + print(table.draw()) + print("\nCodecs:") + table = texttable.Texttable() + table.set_deco(texttable.Texttable.HEADER) + table.set_cols_dtype(['t', 't']) + table.set_cols_align(["l", "r"]) + for c in SupportedMethods.codecs: + m = ''.join('{:02x}'.format(x) for x in c['id']) + table.add_row([m, c['name']]) + print(table.draw()) + print("\nChecks:") + print("CHECK_NONE") + print("CHECK_CRC32") + if is_check_supported(CHECK_CRC64): + print("CHECK_CRC64") + if is_check_supported(CHECK_SHA256): + print("CHECK_SHA256") + + def run_list(self, args): + """Print a table of contents to file. """ + target = args.arcfile + verbose = args.verbose + if not py7zr.is_7zfile(target): + print('not a 7z file') + return(1) + with open(target, 'rb') as f: + a = py7zr.SevenZipFile(f) + file = sys.stdout + archive_info = a.archiveinfo() + archive_list = a.list() + if verbose: + file.write("Listing archive: {}\n".format(target)) + file.write("--\n") + file.write("Path = {}\n".format(archive_info.filename)) + file.write("Type = 7z\n") + fstat = os.stat(archive_info.filename) + file.write("Phisical Size = {}\n".format(fstat.st_size)) + file.write("Headers Size = {}\n".format(archive_info.header_size)) + file.write("Method = {}\n".format(archive_info.method_names)) + if archive_info.solid: + file.write("Solid = {}\n".format('+')) + else: + file.write("Solid = {}\n".format('-')) + file.write("Blocks = {}\n".format(archive_info.blocks)) + file.write('\n') + file.write( + 'total %d files and directories in %sarchive\n' % (len(archive_list), + (archive_info.solid and 'solid ') or '')) + file.write(' Date Time Attr Size Compressed Name\n') + file.write('------------------- ----- ------------ ------------ ------------------------\n') + for f in archive_list: + if f.creationtime is not None: + creationdate = f.creationtime.astimezone(Local).strftime("%Y-%m-%d") + creationtime = f.creationtime.astimezone(Local).strftime("%H:%M:%S") + else: + creationdate = ' ' + creationtime = ' ' + if f.is_directory: + attrib = 'D...' + else: + attrib = '....' + if f.archivable: + attrib += 'A' + else: + attrib += '.' + if f.is_directory: + extra = ' 0 ' + elif f.compressed is None: + extra = ' ' + else: + extra = '%12d ' % (f.compressed) + file.write('%s %s %s %12d %s %s\n' % (creationdate, creationtime, attrib, + f.uncompressed, extra, f.filename)) + file.write('------------------- ----- ------------ ------------ ------------------------\n') + + return(0) + + @staticmethod + def print_archiveinfo(archive, file): + file.write("--\n") + file.write("Path = {}\n".format(archive.filename)) + file.write("Type = 7z\n") + fstat = os.stat(archive.filename) + file.write("Phisical Size = {}\n".format(fstat.st_size)) + file.write("Headers Size = {}\n".format(archive.header.size)) # fixme. + file.write("Method = {}\n".format(archive._get_method_names())) + if archive._is_solid(): + file.write("Solid = {}\n".format('+')) + else: + file.write("Solid = {}\n".format('-')) + file.write("Blocks = {}\n".format(len(archive.header.main_streams.unpackinfo.folders))) + + def run_test(self, args): + target = args.arcfile + if not py7zr.is_7zfile(target): + print('not a 7z file') + return(1) + with open(target, 'rb') as f: + a = py7zr.SevenZipFile(f) + file = sys.stdout + file.write("Testing archive: {}\n".format(a.filename)) + self.print_archiveinfo(archive=a, file=file) + file.write('\n') + if a.testzip() is None: + file.write('Everything is Ok\n') + return(0) + else: + file.write('Bad 7zip file\n') + return(1) + + def run_extract(self, args: argparse.Namespace) -> int: + target = args.arcfile + verbose = args.verbose + if not py7zr.is_7zfile(target): + print('not a 7z file') + return(1) + if not args.password: + password = None # type: Optional[str] + else: + try: + password = getpass.getpass() + except getpass.GetPassWarning: + sys.stderr.write('Warning: your password may be shown.\n') + return(1) + a = py7zr.SevenZipFile(target, 'r', password=password) + cb = None # Optional[ExtractCallback] + if verbose: + archive_info = a.archiveinfo() + cb = CliExtractCallback(total_bytes=archive_info.uncompressed, ofd=sys.stderr) + if args.odir: + a.extractall(path=args.odir, callback=cb) + else: + a.extractall(callback=cb) + return(0) + + def _check_volumesize_valid(self, size: str) -> bool: + if self.unit_pattern.match(size): + return True + else: + return False + + def _volumesize_unitconv(self, size: str) -> int: + m = self.unit_pattern.match(size) + num = m.group(1) + unit = m.group(2) + return int(num) if unit is None else int(num) * self.dunits[unit] + + def run_create(self, args): + sztarget = args.arcfile # type: str + filenames = args.filenames # type: List[str] + volume_size = args.volume[0] if getattr(args, 'volume', None) is not None else None + if volume_size is not None and not self._check_volumesize_valid(volume_size): + sys.stderr.write('Error: Specified volume size is invalid.\n') + self.show_help(args) + exit(1) + if not sztarget.endswith('.7z'): + sztarget += '.7z' + target = pathlib.Path(sztarget) + if target.exists(): + sys.stderr.write('Archive file exists!\n') + self.show_help(args) + exit(1) + with py7zr.SevenZipFile(target, 'w') as szf: + for path in filenames: + src = pathlib.Path(path) + if src.is_dir(): + szf.writeall(src) + else: + szf.write(src) + if volume_size is None: + return (0) + size = self._volumesize_unitconv(volume_size) + self._split_file(target, size) + target.unlink() + return(0) + + def _split_file(self, filepath, size): + chapters = 0 + written = [0, 0] + total_size = filepath.stat().st_size + with filepath.open('rb') as src: + while written[0] <= total_size: + with open(str(filepath) + '.%03d' % chapters, 'wb') as tgt: + written[1] = 0 + while written[1] < size: + read_size = min(READ_BLOCKSIZE, size - written[1]) + tgt.write(src.read(read_size)) + written[1] += read_size + written[0] += read_size + chapters += 1 diff --git a/py7zr/compression.py b/py7zr/compression.py new file mode 100644 index 0000000..4ba303a --- /dev/null +++ b/py7zr/compression.py @@ -0,0 +1,395 @@ +#!/usr/bin/python -u +# +# p7zr library +# +# Copyright (c) 2019 Hiroshi Miura +# Copyright (c) 2004-2015 by Joachim Bauch, mail@joachim-bauch.de +# 7-Zip Copyright (C) 1999-2010 Igor Pavlov +# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +import bz2 +import io +import lzma +import os +import queue +import sys +import threading +from typing import IO, Any, BinaryIO, Dict, List, Optional, Union + +from py7zr.exceptions import Bad7zFile, CrcError, UnsupportedCompressionMethodError +from py7zr.extra import AESDecompressor, CopyDecompressor, DeflateDecompressor, ISevenZipDecompressor, ZstdDecompressor +from py7zr.helpers import MemIO, NullIO, calculate_crc32, readlink +from py7zr.properties import READ_BLOCKSIZE, ArchivePassword, CompressionMethod + +if sys.version_info < (3, 6): + import pathlib2 as pathlib +else: + import pathlib +try: + import zstandard as Zstd # type: ignore +except ImportError: + Zstd = None + + +class Worker: + """Extract worker class to invoke handler""" + + def __init__(self, files, src_start: int, header) -> None: + self.target_filepath = {} # type: Dict[int, Union[MemIO, pathlib.Path, None]] + self.files = files + self.src_start = src_start + self.header = header + + def extract(self, fp: BinaryIO, parallel: bool, q=None) -> None: + """Extract worker method to handle 7zip folder and decompress each files.""" + if hasattr(self.header, 'main_streams') and self.header.main_streams is not None: + src_end = self.src_start + self.header.main_streams.packinfo.packpositions[-1] + numfolders = self.header.main_streams.unpackinfo.numfolders + if numfolders == 1: + self.extract_single(fp, self.files, self.src_start, src_end, q) + else: + folders = self.header.main_streams.unpackinfo.folders + positions = self.header.main_streams.packinfo.packpositions + empty_files = [f for f in self.files if f.emptystream] + if not parallel: + self.extract_single(fp, empty_files, 0, 0, q) + for i in range(numfolders): + self.extract_single(fp, folders[i].files, self.src_start + positions[i], + self.src_start + positions[i + 1], q) + else: + filename = getattr(fp, 'name', None) + self.extract_single(open(filename, 'rb'), empty_files, 0, 0, q) + extract_threads = [] + for i in range(numfolders): + p = threading.Thread(target=self.extract_single, + args=(filename, folders[i].files, + self.src_start + positions[i], self.src_start + positions[i + 1], q)) + p.start() + extract_threads.append((p)) + for p in extract_threads: + p.join() + else: + empty_files = [f for f in self.files if f.emptystream] + self.extract_single(fp, empty_files, 0, 0, q) + + def extract_single(self, fp: Union[BinaryIO, str], files, src_start: int, src_end: int, + q: Optional[queue.Queue]) -> None: + """Single thread extractor that takes file lists in single 7zip folder.""" + if files is None: + return + if isinstance(fp, str): + fp = open(fp, 'rb') + fp.seek(src_start) + for f in files: + if q is not None: + q.put(('s', str(f.filename), str(f.compressed) if f.compressed is not None else '0')) + fileish = self.target_filepath.get(f.id, None) + if fileish is not None: + fileish.parent.mkdir(parents=True, exist_ok=True) + with fileish.open(mode='wb') as ofp: + if not f.emptystream: + # extract to file + crc32 = self.decompress(fp, f.folder, ofp, f.uncompressed[-1], f.compressed, src_end) + ofp.seek(0) + if f.crc32 is not None and crc32 != f.crc32: + raise CrcError("{}".format(f.filename)) + else: + pass # just create empty file + elif not f.emptystream: + # read and bin off a data but check crc + with NullIO() as ofp: + crc32 = self.decompress(fp, f.folder, ofp, f.uncompressed[-1], f.compressed, src_end) + if f.crc32 is not None and crc32 != f.crc32: + raise CrcError("{}".format(f.filename)) + if q is not None: + q.put(('e', str(f.filename), str(f.uncompressed[-1]))) + + def decompress(self, fp: BinaryIO, folder, fq: IO[Any], + size: int, compressed_size: Optional[int], src_end: int) -> int: + """decompressor wrapper called from extract method. + + :parameter fp: archive source file pointer + :parameter folder: Folder object that have decompressor object. + :parameter fq: output file pathlib.Path + :parameter size: uncompressed size of target file. + :parameter compressed_size: compressed size of target file. + :parameter src_end: end position of the folder + :returns CRC32 of the file + """ + assert folder is not None + crc32 = 0 + out_remaining = size + decompressor = folder.get_decompressor(compressed_size) + while out_remaining > 0: + max_length = min(out_remaining, io.DEFAULT_BUFFER_SIZE) + rest_size = src_end - fp.tell() + read_size = min(READ_BLOCKSIZE, rest_size) + if read_size == 0: + tmp = decompressor.decompress(b'', max_length) + if len(tmp) == 0: + raise Exception("decompression get wrong: no output data.") + else: + inp = fp.read(read_size) + tmp = decompressor.decompress(inp, max_length) + if len(tmp) > 0 and out_remaining >= len(tmp): + out_remaining -= len(tmp) + fq.write(tmp) + crc32 = calculate_crc32(tmp, crc32) + if out_remaining <= 0: + break + if fp.tell() >= src_end: + # Check folder.digest integrity. + if decompressor.crc is not None and not decompressor.check_crc(): + raise Bad7zFile("Folder CRC32 error.") + return crc32 + + def _find_link_target(self, target): + """Find the target member of a symlink or hardlink member in the archive. + """ + targetname = target.as_posix() # type: str + linkname = readlink(targetname) + # Check windows full path symlinks + if linkname.startswith("\\\\?\\"): + linkname = linkname[4:] + # normalize as posix style + linkname = pathlib.Path(linkname).as_posix() # type: str + member = None + for j in range(len(self.files)): + if linkname == self.files[j].origin.as_posix(): + # FIXME: when API user specify arcname, it will break + member = os.path.relpath(linkname, os.path.dirname(targetname)) + break + if member is None: + member = linkname + return member + + def archive(self, fp: BinaryIO, folder, deref=False): + """Run archive task for specified 7zip folder.""" + compressor = folder.get_compressor() + outsize = 0 + self.header.main_streams.packinfo.numstreams = 1 + num_unpack_streams = 0 + self.header.main_streams.substreamsinfo.digests = [] + self.header.main_streams.substreamsinfo.digestsdefined = [] + last_file_index = 0 + foutsize = 0 + for i, f in enumerate(self.files): + file_info = f.file_properties() + self.header.files_info.files.append(file_info) + self.header.files_info.emptyfiles.append(f.emptystream) + foutsize = 0 + if f.is_symlink and not deref: + last_file_index = i + num_unpack_streams += 1 + link_target = self._find_link_target(f.origin) # type: str + tgt = link_target.encode('utf-8') # type: bytes + insize = len(tgt) + crc = calculate_crc32(tgt, 0) # type: int + out = compressor.compress(tgt) + outsize += len(out) + foutsize += len(out) + fp.write(out) + self.header.main_streams.substreamsinfo.digests.append(crc) + self.header.main_streams.substreamsinfo.digestsdefined.append(True) + self.header.main_streams.substreamsinfo.unpacksizes.append(insize) + self.header.files_info.files[i]['maxsize'] = foutsize + elif not f.emptystream: + last_file_index = i + num_unpack_streams += 1 + insize = 0 + with f.origin.open(mode='rb') as fd: + data = fd.read(READ_BLOCKSIZE) + insize += len(data) + crc = 0 + while data: + crc = calculate_crc32(data, crc) + out = compressor.compress(data) + outsize += len(out) + foutsize += len(out) + fp.write(out) + data = fd.read(READ_BLOCKSIZE) + insize += len(data) + self.header.main_streams.substreamsinfo.digests.append(crc) + self.header.main_streams.substreamsinfo.digestsdefined.append(True) + self.header.files_info.files[i]['maxsize'] = foutsize + self.header.main_streams.substreamsinfo.unpacksizes.append(insize) + else: + out = compressor.flush() + outsize += len(out) + foutsize += len(out) + fp.write(out) + if len(self.files) > 0: + self.header.files_info.files[last_file_index]['maxsize'] = foutsize + # Update size data in header + self.header.main_streams.packinfo.packsizes = [outsize] + folder.unpacksizes = [sum(self.header.main_streams.substreamsinfo.unpacksizes)] + self.header.main_streams.substreamsinfo.num_unpackstreams_folders = [num_unpack_streams] + + def register_filelike(self, id: int, fileish: Union[MemIO, pathlib.Path, None]) -> None: + """register file-ish to worker.""" + self.target_filepath[id] = fileish + + +class SevenZipDecompressor: + """Main decompressor object which is properly configured and bind to each 7zip folder. + because 7zip folder can have a custom compression method""" + + lzma_methods_map = { + CompressionMethod.LZMA: lzma.FILTER_LZMA1, + CompressionMethod.LZMA2: lzma.FILTER_LZMA2, + CompressionMethod.DELTA: lzma.FILTER_DELTA, + CompressionMethod.P7Z_BCJ: lzma.FILTER_X86, + CompressionMethod.BCJ_ARM: lzma.FILTER_ARM, + CompressionMethod.BCJ_ARMT: lzma.FILTER_ARMTHUMB, + CompressionMethod.BCJ_IA64: lzma.FILTER_IA64, + CompressionMethod.BCJ_PPC: lzma.FILTER_POWERPC, + CompressionMethod.BCJ_SPARC: lzma.FILTER_SPARC, + } + + FILTER_BZIP2 = 0x31 + FILTER_ZIP = 0x32 + FILTER_COPY = 0x33 + FILTER_AES = 0x34 + FILTER_ZSTD = 0x35 + alt_methods_map = { + CompressionMethod.MISC_BZIP2: FILTER_BZIP2, + CompressionMethod.MISC_DEFLATE: FILTER_ZIP, + CompressionMethod.COPY: FILTER_COPY, + CompressionMethod.CRYPT_AES256_SHA256: FILTER_AES, + CompressionMethod.MISC_ZSTD: FILTER_ZSTD, + } + + def __init__(self, coders: List[Dict[str, Any]], size: int, crc: Optional[int]) -> None: + # Get password which was set when creation of py7zr.SevenZipFile object. + self.input_size = size + self.consumed = 0 # type: int + self.crc = crc + self.digest = None # type: Optional[int] + if self._check_lzma_coders(coders): + self._set_lzma_decompressor(coders) + else: + self._set_alternative_decompressor(coders) + + def _check_lzma_coders(self, coders: List[Dict[str, Any]]) -> bool: + res = True + for coder in coders: + if self.lzma_methods_map.get(coder['method'], None) is None: + res = False + break + return res + + def _set_lzma_decompressor(self, coders: List[Dict[str, Any]]) -> None: + filters = [] # type: List[Dict[str, Any]] + for coder in coders: + if coder['numinstreams'] != 1 or coder['numoutstreams'] != 1: + raise UnsupportedCompressionMethodError('Only a simple compression method is currently supported.') + filter_id = self.lzma_methods_map.get(coder['method'], None) + if filter_id is None: + raise UnsupportedCompressionMethodError + properties = coder.get('properties', None) + if properties is not None: + filters[:0] = [lzma._decode_filter_properties(filter_id, properties)] # type: ignore + else: + filters[:0] = [{'id': filter_id}] + self.decompressor = lzma.LZMADecompressor(format=lzma.FORMAT_RAW, filters=filters) # type: Union[bz2.BZ2Decompressor, lzma.LZMADecompressor, ISevenZipDecompressor] # noqa + + def _set_alternative_decompressor(self, coders: List[Dict[str, Any]]) -> None: + filter_id = self.alt_methods_map.get(coders[0]['method'], None) + if filter_id == self.FILTER_BZIP2: + self.decompressor = bz2.BZ2Decompressor() + elif filter_id == self.FILTER_ZIP: + self.decompressor = DeflateDecompressor() + elif filter_id == self.FILTER_COPY: + self.decompressor = CopyDecompressor() + elif filter_id == self.FILTER_ZSTD and Zstd: + self.decompressor = ZstdDecompressor() + elif filter_id == self.FILTER_AES: + password = ArchivePassword().get() + properties = coders[0].get('properties', None) + self.decompressor = AESDecompressor(properties, password, coders[1:]) + else: + raise UnsupportedCompressionMethodError + + def decompress(self, data: bytes, max_length: Optional[int] = None) -> bytes: + self.consumed += len(data) + if max_length is not None: + folder_data = self.decompressor.decompress(data, max_length=max_length) + else: + folder_data = self.decompressor.decompress(data) + # calculate CRC with uncompressed data + if self.crc is not None: + self.digest = calculate_crc32(folder_data, self.digest) + return folder_data + + def check_crc(self): + return self.crc == self.digest + + +class SevenZipCompressor: + + """Main compressor object to configured for each 7zip folder.""" + + __slots__ = ['filters', 'compressor', 'coders'] + + lzma_methods_map_r = { + lzma.FILTER_LZMA2: CompressionMethod.LZMA2, + lzma.FILTER_DELTA: CompressionMethod.DELTA, + lzma.FILTER_X86: CompressionMethod.P7Z_BCJ, + } + + def __init__(self, filters=None): + if filters is None: + self.filters = [{"id": lzma.FILTER_LZMA2, "preset": 7 | lzma.PRESET_EXTREME}, ] + else: + self.filters = filters + self.compressor = lzma.LZMACompressor(format=lzma.FORMAT_RAW, filters=self.filters) + self.coders = [] + for filter in self.filters: + if filter is None: + break + method = self.lzma_methods_map_r[filter['id']] + properties = lzma._encode_filter_properties(filter) + self.coders.append({'method': method, 'properties': properties, 'numinstreams': 1, 'numoutstreams': 1}) + + def compress(self, data): + return self.compressor.compress(data) + + def flush(self): + return self.compressor.flush() + + +def get_methods_names(coders: List[dict]) -> List[str]: + """Return human readable method names for specified coders""" + methods_name_map = { + CompressionMethod.LZMA2: "LZMA2", + CompressionMethod.LZMA: "LZMA", + CompressionMethod.DELTA: "delta", + CompressionMethod.P7Z_BCJ: "BCJ", + CompressionMethod.BCJ_ARM: "BCJ(ARM)", + CompressionMethod.BCJ_ARMT: "BCJ(ARMT)", + CompressionMethod.BCJ_IA64: "BCJ(IA64)", + CompressionMethod.BCJ_PPC: "BCJ(POWERPC)", + CompressionMethod.BCJ_SPARC: "BCJ(SPARC)", + CompressionMethod.CRYPT_AES256_SHA256: "7zAES", + } + methods_names = [] # type: List[str] + for coder in coders: + try: + methods_names.append(methods_name_map[coder['method']]) + except KeyError: + raise UnsupportedCompressionMethodError("Unknown method {}".format(coder['method'])) + return methods_names diff --git a/py7zr/exceptions.py b/py7zr/exceptions.py new file mode 100644 index 0000000..4286266 --- /dev/null +++ b/py7zr/exceptions.py @@ -0,0 +1,46 @@ +# +# p7zr library +# +# Copyright (c) 2019 Hiroshi Miura +# Copyright (c) 2004-2015 by Joachim Bauch, mail@joachim-bauch.de +# 7-Zip Copyright (C) 1999-2010 Igor Pavlov +# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + + +class ArchiveError(Exception): + pass + + +class Bad7zFile(ArchiveError): + pass + + +class CrcError(ArchiveError): + pass + + +class UnsupportedCompressionMethodError(ArchiveError): + pass + + +class DecompressionError(ArchiveError): + pass + + +class InternalError(ArchiveError): + pass diff --git a/py7zr/extra.py b/py7zr/extra.py new file mode 100644 index 0000000..309ea94 --- /dev/null +++ b/py7zr/extra.py @@ -0,0 +1,214 @@ +#!/usr/bin/python -u +# +# p7zr library +# +# Copyright (c) 2019 Hiroshi Miura +# Copyright (c) 2004-2015 by Joachim Bauch, mail@joachim-bauch.de +# 7-Zip Copyright (C) 1999-2010 Igor Pavlov +# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +import lzma +import zlib +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Union + +from Crypto.Cipher import AES + +from py7zr import UnsupportedCompressionMethodError +from py7zr.helpers import Buffer, calculate_key +from py7zr.properties import READ_BLOCKSIZE, CompressionMethod + +try: + import zstandard as Zstd # type: ignore +except ImportError: + Zstd = None + + +class ISevenZipCompressor(ABC): + @abstractmethod + def compress(self, data: Union[bytes, bytearray, memoryview]) -> bytes: + pass + + @abstractmethod + def flush(self) -> bytes: + pass + + +class ISevenZipDecompressor(ABC): + @abstractmethod + def decompress(self, data: Union[bytes, bytearray, memoryview], max_length: int = -1) -> bytes: + pass + + +class DeflateDecompressor(ISevenZipDecompressor): + def __init__(self): + self.buf = b'' + self._decompressor = zlib.decompressobj(-15) + + def decompress(self, data: Union[bytes, bytearray, memoryview], max_length: int = -1): + if max_length < 0: + res = self.buf + self._decompressor.decompress(data) + self.buf = b'' + else: + tmp = self.buf + self._decompressor.decompress(data) + res = tmp[:max_length] + self.buf = tmp[max_length:] + return res + + +class CopyDecompressor(ISevenZipDecompressor): + + def __init__(self): + self._buf = bytes() + + def decompress(self, data: Union[bytes, bytearray, memoryview], max_length: int = -1) -> bytes: + if max_length < 0: + length = len(data) + else: + length = min(len(data), max_length) + buflen = len(self._buf) + if length > buflen: + res = self._buf + data[:length - buflen] + self._buf = data[length - buflen:] + else: + res = self._buf[:length] + self._buf = self._buf[length:] + data + return res + + +class AESDecompressor(ISevenZipDecompressor): + + lzma_methods_map = { + CompressionMethod.LZMA: lzma.FILTER_LZMA1, + CompressionMethod.LZMA2: lzma.FILTER_LZMA2, + CompressionMethod.DELTA: lzma.FILTER_DELTA, + CompressionMethod.P7Z_BCJ: lzma.FILTER_X86, + CompressionMethod.BCJ_ARM: lzma.FILTER_ARM, + CompressionMethod.BCJ_ARMT: lzma.FILTER_ARMTHUMB, + CompressionMethod.BCJ_IA64: lzma.FILTER_IA64, + CompressionMethod.BCJ_PPC: lzma.FILTER_POWERPC, + CompressionMethod.BCJ_SPARC: lzma.FILTER_SPARC, + } + + def __init__(self, aes_properties: bytes, password: str, coders: List[Dict[str, Any]]) -> None: + byte_password = password.encode('utf-16LE') + firstbyte = aes_properties[0] + numcyclespower = firstbyte & 0x3f + if firstbyte & 0xc0 != 0: + saltsize = (firstbyte >> 7) & 1 + ivsize = (firstbyte >> 6) & 1 + secondbyte = aes_properties[1] + saltsize += (secondbyte >> 4) + ivsize += (secondbyte & 0x0f) + assert len(aes_properties) == 2 + saltsize + ivsize + salt = aes_properties[2:2 + saltsize] + iv = aes_properties[2 + saltsize:2 + saltsize + ivsize] + assert len(salt) == saltsize + assert len(iv) == ivsize + assert numcyclespower <= 24 + if ivsize < 16: + iv += bytes('\x00' * (16 - ivsize), 'ascii') + key = calculate_key(byte_password, numcyclespower, salt, 'sha256') + if len(coders) > 0: + self.lzma_decompressor = self._set_lzma_decompressor(coders) # type: Union[lzma.LZMADecompressor, CopyDecompressor] # noqa + else: + self.lzma_decompressor = CopyDecompressor() + self.cipher = AES.new(key, AES.MODE_CBC, iv) + self.buf = Buffer(size=READ_BLOCKSIZE + 16) + self.flushed = False + else: + raise UnsupportedCompressionMethodError + + # set pipeline decompressor + def _set_lzma_decompressor(self, coders: List[Dict[str, Any]]) -> lzma.LZMADecompressor: + filters = [] # type: List[Dict[str, Any]] + for coder in coders: + filter = self.lzma_methods_map.get(coder['method'], None) + if filter is not None: + properties = coder.get('properties', None) + if properties is not None: + filters[:0] = [lzma._decode_filter_properties(filter, properties)] # type: ignore + else: + filters[:0] = [{'id': filter}] + else: + raise UnsupportedCompressionMethodError + return lzma.LZMADecompressor(format=lzma.FORMAT_RAW, filters=filters) + + def decompress(self, data: Union[bytes, bytearray, memoryview], max_length: int = -1) -> bytes: + if len(data) == 0 and len(self.buf) == 0: # action flush + return self.lzma_decompressor.decompress(b'', max_length) + elif len(data) == 0: # action padding + self.flushded = True + # align = 16 + # padlen = (align - offset % align) % align + # = (align - (offset & (align - 1))) & (align - 1) + # = -offset & (align -1) + # = -offset & (16 - 1) = -offset & 15 + padlen = -len(self.buf) & 15 + self.buf.add(bytes(padlen)) + temp = self.cipher.decrypt(self.buf.view) # type: bytes + self.buf.reset() + return self.lzma_decompressor.decompress(temp, max_length) + else: + currentlen = len(self.buf) + len(data) + nextpos = (currentlen // 16) * 16 + if currentlen == nextpos: + self.buf.add(data) + temp = self.cipher.decrypt(self.buf.view) + self.buf.reset() + return self.lzma_decompressor.decompress(temp, max_length) + else: + buflen = len(self.buf) + temp2 = data[nextpos - buflen:] + self.buf.add(data[:nextpos - buflen]) + temp = self.cipher.decrypt(self.buf.view) + self.buf.set(temp2) + return self.lzma_decompressor.decompress(temp, max_length) + + +class ZstdDecompressor(ISevenZipDecompressor): + + def __init__(self): + if Zstd is None: + raise UnsupportedCompressionMethodError + self.buf = b'' # type: bytes + self._ctc = Zstd.ZstdDecompressor() # type: ignore + + def decompress(self, data: Union[bytes, bytearray, memoryview], max_length: int = -1) -> bytes: + dobj = self._ctc.decompressobj() # type: ignore + if max_length < 0: + res = self.buf + dobj.decompress(data) + self.buf = b'' + else: + tmp = self.buf + dobj.decompress(data) + res = tmp[:max_length] + self.buf = tmp[max_length:] + return res + + +class ZstdCompressor(ISevenZipCompressor): + + def __init__(self): + if Zstd is None: + raise UnsupportedCompressionMethodError + self._ctc = Zstd.ZstdCompressor() # type: ignore + + def compress(self, data: Union[bytes, bytearray, memoryview]) -> bytes: + return self._ctc.compress(data) # type: ignore + + def flush(self): + pass diff --git a/py7zr/helpers.py b/py7zr/helpers.py new file mode 100644 index 0000000..0bd7eba --- /dev/null +++ b/py7zr/helpers.py @@ -0,0 +1,397 @@ +#!/usr/bin/python -u +# +# p7zr library +# +# Copyright (c) 2019 Hiroshi Miura +# Copyright (c) 2004-2015 by Joachim Bauch, mail@joachim-bauch.de +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# + +import _hashlib # type: ignore # noqa +import ctypes +import os +import pathlib +import platform +import sys +import time as _time +import zlib +from datetime import datetime, timedelta, timezone, tzinfo +from typing import BinaryIO, Optional, Union + +import py7zr.win32compat + + +def calculate_crc32(data: bytes, value: Optional[int] = None, blocksize: int = 1024 * 1024) -> int: + """Calculate CRC32 of strings with arbitrary lengths.""" + length = len(data) + pos = blocksize + if value: + value = zlib.crc32(data[:pos], value) + else: + value = zlib.crc32(data[:pos]) + while pos < length: + value = zlib.crc32(data[pos:pos + blocksize], value) + pos += blocksize + + return value & 0xffffffff + + +def _calculate_key1(password: bytes, cycles: int, salt: bytes, digest: str) -> bytes: + """Calculate 7zip AES encryption key. Base implementation. """ + if digest not in ('sha256'): + raise ValueError('Unknown digest method for password protection.') + assert cycles <= 0x3f + if cycles == 0x3f: + ba = bytearray(salt + password + bytes(32)) + key = bytes(ba[:32]) # type: bytes + else: + rounds = 1 << cycles + m = _hashlib.new(digest) + for round in range(rounds): + m.update(salt + password + round.to_bytes(8, byteorder='little', signed=False)) + key = m.digest()[:32] + return key + + +def _calculate_key2(password: bytes, cycles: int, salt: bytes, digest: str): + """Calculate 7zip AES encryption key. + It utilize ctypes and memoryview buffer and zero-copy technology on Python.""" + if digest not in ('sha256'): + raise ValueError('Unknown digest method for password protection.') + assert cycles <= 0x3f + if cycles == 0x3f: + key = bytes(bytearray(salt + password + bytes(32))[:32]) # type: bytes + else: + rounds = 1 << cycles + m = _hashlib.new(digest) + length = len(salt) + len(password) + + class RoundBuf(ctypes.LittleEndianStructure): + _pack_ = 1 + _fields_ = [ + ('saltpassword', ctypes.c_ubyte * length), + ('round', ctypes.c_uint64) + ] + + buf = RoundBuf() + for i, c in enumerate(salt + password): + buf.saltpassword[i] = c + buf.round = 0 + mv = memoryview(buf) # type: ignore # noqa + while buf.round < rounds: + m.update(mv) + buf.round += 1 + key = m.digest()[:32] + return key + + +def _calculate_key3(password: bytes, cycles: int, salt: bytes, digest: str) -> bytes: + """Calculate 7zip AES encryption key. + Concat values in order to reduce number of calls of Hash.update().""" + if digest not in ('sha256'): + raise ValueError('Unknown digest method for password protection.') + assert cycles <= 0x3f + if cycles == 0x3f: + ba = bytearray(salt + password + bytes(32)) + key = bytes(ba[:32]) # type: bytes + else: + cat_cycle = 6 + if cycles > cat_cycle: + rounds = 1 << cat_cycle + stages = 1 << (cycles - cat_cycle) + else: + rounds = 1 << cycles + stages = 1 << 0 + m = _hashlib.new(digest) + saltpassword = salt + password + s = 0 # type: int # (0..stages) * rounds + if platform.python_implementation() == "PyPy": + for _ in range(stages): + m.update(memoryview(b''.join([saltpassword + (s + i).to_bytes(8, byteorder='little', signed=False) + for i in range(rounds)]))) + s += rounds + else: + for _ in range(stages): + m.update(b''.join([saltpassword + (s + i).to_bytes(8, byteorder='little', signed=False) + for i in range(rounds)])) + s += rounds + key = m.digest()[:32] + + return key + + +if platform.python_implementation() == "PyPy" or sys.version_info > (3, 6): + calculate_key = _calculate_key3 +else: + calculate_key = _calculate_key2 # it is faster when CPython 3.6.x + + +def filetime_to_dt(ft): + """Convert Windows NTFS file time into python datetime object.""" + EPOCH_AS_FILETIME = 116444736000000000 + us = (ft - EPOCH_AS_FILETIME) // 10 + return datetime(1970, 1, 1, tzinfo=timezone.utc) + timedelta(microseconds=us) + + +ZERO = timedelta(0) +HOUR = timedelta(hours=1) +SECOND = timedelta(seconds=1) + +# A class capturing the platform's idea of local time. +# (May result in wrong values on historical times in +# timezones where UTC offset and/or the DST rules had +# changed in the past.) + +STDOFFSET = timedelta(seconds=-_time.timezone) +if _time.daylight: + DSTOFFSET = timedelta(seconds=-_time.altzone) +else: + DSTOFFSET = STDOFFSET + +DSTDIFF = DSTOFFSET - STDOFFSET + + +class LocalTimezone(tzinfo): + + def fromutc(self, dt): + assert dt.tzinfo is self + stamp = (dt - datetime(1970, 1, 1, tzinfo=self)) // SECOND + args = _time.localtime(stamp)[:6] + dst_diff = DSTDIFF // SECOND + # Detect fold + fold = (args == _time.localtime(stamp - dst_diff)) + return datetime(*args, microsecond=dt.microsecond, tzinfo=self) + + def utcoffset(self, dt): + if self._isdst(dt): + return DSTOFFSET + else: + return STDOFFSET + + def dst(self, dt): + if self._isdst(dt): + return DSTDIFF + else: + return ZERO + + def tzname(self, dt): + return _time.tzname[self._isdst(dt)] + + def _isdst(self, dt): + tt = (dt.year, dt.month, dt.day, + dt.hour, dt.minute, dt.second, + dt.weekday(), 0, 0) + stamp = _time.mktime(tt) + tt = _time.localtime(stamp) + return tt.tm_isdst > 0 + + +Local = LocalTimezone() +TIMESTAMP_ADJUST = -11644473600 + + +class UTC(tzinfo): + """UTC""" + + def utcoffset(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return ZERO + + def _call__(self): + return self + + +class ArchiveTimestamp(int): + """Windows FILETIME timestamp.""" + + def __repr__(self): + return '%s(%d)' % (type(self).__name__, self) + + def totimestamp(self) -> float: + """Convert 7z FILETIME to Python timestamp.""" + # FILETIME is 100-nanosecond intervals since 1601/01/01 (UTC) + return (self / 10000000.0) + TIMESTAMP_ADJUST + + def as_datetime(self): + """Convert FILETIME to Python datetime object.""" + return datetime.fromtimestamp(self.totimestamp(), UTC()) + + @staticmethod + def from_datetime(val): + return ArchiveTimestamp((val - TIMESTAMP_ADJUST) * 10000000.0) + + +def islink(path): + """ + Cross-platform islink implementation. + Supports Windows NT symbolic links and reparse points. + """ + is_symlink = os.path.islink(str(path)) + if sys.version_info >= (3, 8) or sys.platform != "win32" or sys.getwindowsversion()[0] < 6: + return is_symlink + # special check for directory junctions which py38 does. + if is_symlink: + if py7zr.win32compat.is_reparse_point(path): + is_symlink = False + return is_symlink + + +def readlink(path: Union[str, pathlib.Path], *, dir_fd=None) -> Union[str, pathlib.Path]: + """ + Cross-platform compat implementation of os.readlink and Path.readlink(). + Supports Windows NT symbolic links and reparse points. + When called with path argument as pathlike(str), return result as a pathlike(str). + When called with Path object, return also Path object. + When called with path argument as bytes, return result as a bytes. + """ + is_path_pathlib = isinstance(path, pathlib.Path) + if sys.version_info >= (3, 9): + if is_path_pathlib and dir_fd is None: + return path.readlink() + else: + return os.readlink(path, dir_fd=dir_fd) + elif sys.version_info >= (3, 8) or sys.platform != "win32": + res = os.readlink(path, dir_fd=dir_fd) + # Hack to handle a wrong type of results + if isinstance(res, bytes): + res = os.fsdecode(res) + if is_path_pathlib: + return pathlib.Path(res) + else: + return res + elif not os.path.exists(str(path)): + raise OSError(22, 'Invalid argument', path) + return py7zr.win32compat.readlink(path) + + +class MemIO: + """pathlib.Path-like IO class to write memory(io.Bytes)""" + def __init__(self, buf: BinaryIO): + self._buf = buf + + def write(self, data: bytes) -> int: + return self._buf.write(data) + + def read(self, length: Optional[int] = None) -> bytes: + if length is not None: + return self._buf.read(length) + else: + return self._buf.read() + + def close(self) -> None: + self._buf.seek(0) + + def flush(self) -> None: + pass + + def seek(self, position: int) -> None: + self._buf.seek(position) + + def open(self, mode=None): + return self + + @property + def parent(self): + return self + + def mkdir(self, parents=None, exist_ok=False): + return None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +class NullIO: + """pathlib.Path-like IO class of /dev/null""" + + def __init__(self): + pass + + def write(self, data): + return len(data) + + def read(self, length=None): + if length is not None: + return bytes(length) + else: + return b'' + + def close(self): + pass + + def flush(self): + pass + + def open(self, mode=None): + return self + + @property + def parent(self): + return self + + def mkdir(self): + return None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +class BufferOverflow(Exception): + pass + + +class Buffer: + + def __init__(self, size: int = 16): + self._size = size + self._buf = bytearray(size) + self._buflen = 0 + self.view = memoryview(self._buf[0:0]) + + def add(self, data: Union[bytes, bytearray, memoryview]): + length = len(data) + if length + self._buflen > self._size: + raise BufferOverflow() + self._buf[self._buflen:self._buflen + length] = data + self._buflen += length + self.view = memoryview(self._buf[0:self._buflen]) + + def reset(self) -> None: + self._buflen = 0 + self.view = memoryview(self._buf[0:0]) + + def set(self, data: Union[bytes, bytearray, memoryview]) -> None: + length = len(data) + if length > self._size: + raise BufferOverflow() + self._buf[0:length] = data + self._buflen = length + self.view = memoryview(self._buf[0:length]) + + def __len__(self) -> int: + return self._buflen diff --git a/py7zr/properties.py b/py7zr/properties.py new file mode 100644 index 0000000..38cfbe8 --- /dev/null +++ b/py7zr/properties.py @@ -0,0 +1,155 @@ +# +# p7zr library +# +# Copyright (c) 2019 Hiroshi Miura +# Copyright (c) 2004-2015 by Joachim Bauch, mail@joachim-bauch.de +# 7-Zip Copyright (C) 1999-2010 Igor Pavlov +# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# + +import binascii +from enum import Enum +from typing import Optional + +MAGIC_7Z = binascii.unhexlify('377abcaf271c') +FINISH_7Z = binascii.unhexlify('377abcaf271d') +READ_BLOCKSIZE = 32248 +QUEUELEN = READ_BLOCKSIZE * 2 + +READ_BLOCKSIZE = 32248 + + +class ByteEnum(bytes, Enum): + pass + + +class Property(ByteEnum): + """Hold 7zip property fixed values.""" + END = binascii.unhexlify('00') + HEADER = binascii.unhexlify('01') + ARCHIVE_PROPERTIES = binascii.unhexlify('02') + ADDITIONAL_STREAMS_INFO = binascii.unhexlify('03') + MAIN_STREAMS_INFO = binascii.unhexlify('04') + FILES_INFO = binascii.unhexlify('05') + PACK_INFO = binascii.unhexlify('06') + UNPACK_INFO = binascii.unhexlify('07') + SUBSTREAMS_INFO = binascii.unhexlify('08') + SIZE = binascii.unhexlify('09') + CRC = binascii.unhexlify('0a') + FOLDER = binascii.unhexlify('0b') + CODERS_UNPACK_SIZE = binascii.unhexlify('0c') + NUM_UNPACK_STREAM = binascii.unhexlify('0d') + EMPTY_STREAM = binascii.unhexlify('0e') + EMPTY_FILE = binascii.unhexlify('0f') + ANTI = binascii.unhexlify('10') + NAME = binascii.unhexlify('11') + CREATION_TIME = binascii.unhexlify('12') + LAST_ACCESS_TIME = binascii.unhexlify('13') + LAST_WRITE_TIME = binascii.unhexlify('14') + ATTRIBUTES = binascii.unhexlify('15') + COMMENT = binascii.unhexlify('16') + ENCODED_HEADER = binascii.unhexlify('17') + START_POS = binascii.unhexlify('18') + DUMMY = binascii.unhexlify('19') + + +class CompressionMethod(ByteEnum): + """Hold fixed values for method parameter.""" + COPY = binascii.unhexlify('00') + DELTA = binascii.unhexlify('03') + BCJ = binascii.unhexlify('04') + PPC = binascii.unhexlify('05') + IA64 = binascii.unhexlify('06') + ARM = binascii.unhexlify('07') + ARMT = binascii.unhexlify('08') + SPARC = binascii.unhexlify('09') + # SWAP = 02.. + SWAP2 = binascii.unhexlify('020302') + SWAP4 = binascii.unhexlify('020304') + # 7Z = 03.. + LZMA = binascii.unhexlify('030101') + PPMD = binascii.unhexlify('030401') + P7Z_BCJ = binascii.unhexlify('03030103') + P7Z_BCJ2 = binascii.unhexlify('0303011B') + BCJ_PPC = binascii.unhexlify('03030205') + BCJ_IA64 = binascii.unhexlify('03030401') + BCJ_ARM = binascii.unhexlify('03030501') + BCJ_ARMT = binascii.unhexlify('03030701') + BCJ_SPARC = binascii.unhexlify('03030805') + LZMA2 = binascii.unhexlify('21') + # MISC : 04.. + MISC_ZIP = binascii.unhexlify('0401') + MISC_BZIP2 = binascii.unhexlify('040202') + MISC_DEFLATE = binascii.unhexlify('040108') + MISC_DEFLATE64 = binascii.unhexlify('040109') + MISC_Z = binascii.unhexlify('0405') + MISC_LZH = binascii.unhexlify('0406') + NSIS_DEFLATE = binascii.unhexlify('040901') + NSIS_BZIP2 = binascii.unhexlify('040902') + # + MISC_ZSTD = binascii.unhexlify('04f71101') + MISC_BROTLI = binascii.unhexlify('04f71102') + MISC_LZ4 = binascii.unhexlify('04f71104') + MISC_LZS = binascii.unhexlify('04f71105') + MISC_LIZARD = binascii.unhexlify('04f71106') + # CRYPTO 06.. + CRYPT_ZIPCRYPT = binascii.unhexlify('06f10101') + CRYPT_RAR29AES = binascii.unhexlify('06f10303') + CRYPT_AES256_SHA256 = binascii.unhexlify('06f10701') + + +class SupportedMethods: + """Hold list of methods which python3 can support.""" + formats = [{'name': "7z", 'magic': MAGIC_7Z}] + codecs = [{'id': CompressionMethod.LZMA, 'name': "LZMA"}, + {'id': CompressionMethod.LZMA2, 'name': "LZMA2"}, + {'id': CompressionMethod.DELTA, 'name': "DELTA"}, + {'id': CompressionMethod.P7Z_BCJ, 'name': "BCJ"}, + {'id': CompressionMethod.BCJ_PPC, 'name': 'PPC'}, + {'id': CompressionMethod.BCJ_IA64, 'name': 'IA64'}, + {'id': CompressionMethod.BCJ_ARM, 'name': "ARM"}, + {'id': CompressionMethod.BCJ_ARMT, 'name': "ARMT"}, + {'id': CompressionMethod.BCJ_SPARC, 'name': 'SPARC'} + ] + + +# this class is Borg/Singleton +class ArchivePassword: + + _shared_state = { + '_password': None, + } + + def __init__(self, password: Optional[str] = None): + self.__dict__ = self._shared_state + if password is not None: + self._password = password + + def set(self, password): + self._password = password + + def get(self): + if self._password is not None: + return self._password + else: + return '' + + def __str__(self): + if self._password is not None: + return self._password + else: + return '' diff --git a/py7zr/py7zr.py b/py7zr/py7zr.py new file mode 100644 index 0000000..7d228d0 --- /dev/null +++ b/py7zr/py7zr.py @@ -0,0 +1,974 @@ +#!/usr/bin/python -u +# +# p7zr library +# +# Copyright (c) 2019,2020 Hiroshi Miura +# Copyright (c) 2004-2015 by Joachim Bauch, mail@joachim-bauch.de +# 7-Zip Copyright (C) 1999-2010 Igor Pavlov +# LZMA SDK Copyright (C) 1999-2010 Igor Pavlov +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# This library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with this library; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +# +# +"""Read 7zip format archives.""" +import collections.abc +import datetime +import errno +import functools +import io +import operator +import os +import queue +import stat +import sys +import threading +from io import BytesIO +from typing import IO, Any, BinaryIO, Dict, List, Optional, Tuple, Union + +from py7zr.archiveinfo import Folder, Header, SignatureHeader +from py7zr.callbacks import ExtractCallback +from py7zr.compression import SevenZipCompressor, Worker, get_methods_names +from py7zr.exceptions import Bad7zFile, CrcError, DecompressionError, InternalError +from py7zr.helpers import ArchiveTimestamp, MemIO, calculate_crc32, filetime_to_dt +from py7zr.properties import MAGIC_7Z, READ_BLOCKSIZE, ArchivePassword + +if sys.version_info < (3, 6): + import contextlib2 as contextlib + import pathlib2 as pathlib +else: + import contextlib + import pathlib + +if sys.platform.startswith('win'): + import _winapi + +FILE_ATTRIBUTE_UNIX_EXTENSION = 0x8000 +FILE_ATTRIBUTE_WINDOWS_MASK = 0x04fff + + +class ArchiveFile: + """Represent each files metadata inside archive file. + It holds file properties; filename, permissions, and type whether + it is directory, link or normal file. + + Instances of the :class:`ArchiveFile` class are returned by iterating :attr:`files_list` of + :class:`SevenZipFile` objects. + Each object stores information about a single member of the 7z archive. Most of users use :meth:`extractall()`. + + The class also hold an archive parameter where file is exist in + archive file folder(container).""" + def __init__(self, id: int, file_info: Dict[str, Any]) -> None: + self.id = id + self._file_info = file_info + + def file_properties(self) -> Dict[str, Any]: + """Return file properties as a hash object. Following keys are included: ‘readonly’, ‘is_directory’, + ‘posix_mode’, ‘archivable’, ‘emptystream’, ‘filename’, ‘creationtime’, ‘lastaccesstime’, + ‘lastwritetime’, ‘attributes’ + """ + properties = self._file_info + if properties is not None: + properties['readonly'] = self.readonly + properties['posix_mode'] = self.posix_mode + properties['archivable'] = self.archivable + properties['is_directory'] = self.is_directory + return properties + + def _get_property(self, key: str) -> Any: + try: + return self._file_info[key] + except KeyError: + return None + + @property + def origin(self) -> pathlib.Path: + return self._get_property('origin') + + @property + def folder(self) -> Folder: + return self._get_property('folder') + + @property + def filename(self) -> str: + """return filename of archive file.""" + return self._get_property('filename') + + @property + def emptystream(self) -> bool: + """True if file is empty(0-byte file), otherwise False""" + return self._get_property('emptystream') + + @property + def uncompressed(self) -> List[int]: + return self._get_property('uncompressed') + + @property + def uncompressed_size(self) -> int: + """Uncompressed file size.""" + return functools.reduce(operator.add, self.uncompressed) + + @property + def compressed(self) -> Optional[int]: + """Compressed size""" + return self._get_property('compressed') + + @property + def crc32(self) -> Optional[int]: + """CRC of archived file(optional)""" + return self._get_property('digest') + + def _test_attribute(self, target_bit: int) -> bool: + attributes = self._get_property('attributes') + if attributes is None: + return False + return attributes & target_bit == target_bit + + @property + def archivable(self) -> bool: + """File has a Windows `archive` flag.""" + return self._test_attribute(stat.FILE_ATTRIBUTE_ARCHIVE) # type: ignore # noqa + + @property + def is_directory(self) -> bool: + """True if file is a directory, otherwise False.""" + return self._test_attribute(stat.FILE_ATTRIBUTE_DIRECTORY) # type: ignore # noqa + + @property + def readonly(self) -> bool: + """True if file is readonly, otherwise False.""" + return self._test_attribute(stat.FILE_ATTRIBUTE_READONLY) # type: ignore # noqa + + def _get_unix_extension(self) -> Optional[int]: + attributes = self._get_property('attributes') + if self._test_attribute(FILE_ATTRIBUTE_UNIX_EXTENSION): + return attributes >> 16 + return None + + @property + def is_symlink(self) -> bool: + """True if file is a symbolic link, otherwise False.""" + e = self._get_unix_extension() + if e is not None: + return stat.S_ISLNK(e) + return self._test_attribute(stat.FILE_ATTRIBUTE_REPARSE_POINT) # type: ignore # noqa + + @property + def is_junction(self) -> bool: + """True if file is a junction/reparse point on windows, otherwise False.""" + return self._test_attribute(stat.FILE_ATTRIBUTE_REPARSE_POINT | # type: ignore # noqa + stat.FILE_ATTRIBUTE_DIRECTORY) # type: ignore # noqa + + @property + def is_socket(self) -> bool: + """True if file is a socket, otherwise False.""" + e = self._get_unix_extension() + if e is not None: + return stat.S_ISSOCK(e) + return False + + @property + def lastwritetime(self) -> Optional[ArchiveTimestamp]: + """Return last written timestamp of a file.""" + return self._get_property('lastwritetime') + + @property + def posix_mode(self) -> Optional[int]: + """ + posix mode when a member has a unix extension property, or None + :return: Return file stat mode can be set by os.chmod() + """ + e = self._get_unix_extension() + if e is not None: + return stat.S_IMODE(e) + return None + + @property + def st_fmt(self) -> Optional[int]: + """ + :return: Return the portion of the file mode that describes the file type + """ + e = self._get_unix_extension() + if e is not None: + return stat.S_IFMT(e) + return None + + +class ArchiveFileList(collections.abc.Iterable): + """Iteratable container of ArchiveFile.""" + + def __init__(self, offset: int = 0): + self.files_list = [] # type: List[dict] + self.index = 0 + self.offset = offset + + def append(self, file_info: Dict[str, Any]) -> None: + self.files_list.append(file_info) + + def __len__(self) -> int: + return len(self.files_list) + + def __iter__(self) -> 'ArchiveFileListIterator': + return ArchiveFileListIterator(self) + + def __getitem__(self, index): + if index > len(self.files_list): + raise IndexError + if index < 0: + raise IndexError + res = ArchiveFile(index + self.offset, self.files_list[index]) + return res + + +class ArchiveFileListIterator(collections.abc.Iterator): + + def __init__(self, archive_file_list): + self._archive_file_list = archive_file_list + self._index = 0 + + def __next__(self) -> ArchiveFile: + if self._index == len(self._archive_file_list): + raise StopIteration + res = self._archive_file_list[self._index] + self._index += 1 + return res + + +# ------------------ +# Exported Classes +# ------------------ +class ArchiveInfo: + """Hold archive information""" + + def __init__(self, filename, size, header_size, method_names, solid, blocks, uncompressed): + self.filename = filename + self.size = size + self.header_size = header_size + self.method_names = method_names + self.solid = solid + self.blocks = blocks + self.uncompressed = uncompressed + + +class FileInfo: + """Hold archived file information.""" + + def __init__(self, filename, compressed, uncompressed, archivable, is_directory, creationtime, crc32): + self.filename = filename + self.compressed = compressed + self.uncompressed = uncompressed + self.archivable = archivable + self.is_directory = is_directory + self.creationtime = creationtime + self.crc32 = crc32 + + +class SevenZipFile(contextlib.AbstractContextManager): + """The SevenZipFile Class provides an interface to 7z archives.""" + + def __init__(self, file: Union[BinaryIO, str, pathlib.Path], mode: str = 'r', + *, filters: Optional[str] = None, dereference=False, password: Optional[str] = None) -> None: + if mode not in ('r', 'w', 'x', 'a'): + raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'") + if password is not None: + if mode not in ('r'): + raise NotImplementedError("It has not been implemented to create archive with password.") + ArchivePassword(password) + self.password_protected = True + else: + self.password_protected = False + # Check if we were passed a file-like object or not + if isinstance(file, str): + self._filePassed = False # type: bool + self.filename = file # type: str + if mode == 'r': + self.fp = open(file, 'rb') # type: BinaryIO + elif mode == 'w': + self.fp = open(file, 'w+b') + elif mode == 'x': + self.fp = open(file, 'x+b') + elif mode == 'a': + self.fp = open(file, 'r+b') + else: + raise ValueError("File open error.") + self.mode = mode + elif isinstance(file, pathlib.Path): + self._filePassed = False + self.filename = str(file) + if mode == 'r': + self.fp = file.open(mode='rb') # type: ignore # noqa # typeshed issue: 2911 + elif mode == 'w': + self.fp = file.open(mode='w+b') # type: ignore # noqa + elif mode == 'x': + self.fp = file.open(mode='x+b') # type: ignore # noqa + elif mode == 'a': + self.fp = file.open(mode='r+b') # type: ignore # noqa + else: + raise ValueError("File open error.") + self.mode = mode + elif isinstance(file, io.IOBase): + self._filePassed = True + self.fp = file + self.filename = getattr(file, 'name', None) + self.mode = mode # type: ignore #noqa + else: + raise TypeError("invalid file: {}".format(type(file))) + self._fileRefCnt = 1 + try: + if mode == "r": + self._real_get_contents(self.fp) + self._reset_worker() + elif mode in 'w': + # FIXME: check filters here + self.folder = self._create_folder(filters) + self.files = ArchiveFileList() + self._prepare_write() + self._reset_worker() + elif mode in 'x': + raise NotImplementedError + elif mode == 'a': + raise NotImplementedError + else: + raise ValueError("Mode must be 'r', 'w', 'x', or 'a'") + except Exception as e: + self._fpclose() + raise e + self.encoded_header_mode = False + self._dict = {} # type: Dict[str, IO[Any]] + self.dereference = dereference + self.reporterd = None # type: Optional[threading.Thread] + self.q = queue.Queue() # type: queue.Queue[Any] + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.close() + + def _create_folder(self, filters): + folder = Folder() + folder.compressor = SevenZipCompressor(filters) + folder.coders = folder.compressor.coders + folder.solid = True + folder.digestdefined = False + folder.bindpairs = [] + folder.totalin = 1 + folder.totalout = 1 + return folder + + def _fpclose(self) -> None: + assert self._fileRefCnt > 0 + self._fileRefCnt -= 1 + if not self._fileRefCnt and not self._filePassed: + self.fp.close() + + def _real_get_contents(self, fp: BinaryIO) -> None: + if not self._check_7zfile(fp): + raise Bad7zFile('not a 7z file') + self.sig_header = SignatureHeader.retrieve(self.fp) + self.afterheader = self.fp.tell() + buffer = self._read_header_data() + header = Header.retrieve(self.fp, buffer, self.afterheader) + if header is None: + return + self.header = header + buffer.close() + self.files = ArchiveFileList() + if getattr(self.header, 'files_info', None) is not None: + self._filelist_retrieve() + + def _read_header_data(self) -> BytesIO: + self.fp.seek(self.sig_header.nextheaderofs, os.SEEK_CUR) + buffer = io.BytesIO(self.fp.read(self.sig_header.nextheadersize)) + if self.sig_header.nextheadercrc != calculate_crc32(buffer.getvalue()): + raise Bad7zFile('invalid header data') + return buffer + + class ParseStatus: + def __init__(self, src_pos=0): + self.src_pos = src_pos + self.folder = 0 # 7zip folder where target stored + self.outstreams = 0 # output stream count + self.input = 0 # unpack stream count in each folder + self.stream = 0 # target input stream position + + def _gen_filename(self) -> str: + # compressed file is stored without a name, generate one + try: + basefilename = self.filename + except AttributeError: + # 7z archive file doesn't have a name + return 'contents' + else: + if basefilename is not None: + fn, ext = os.path.splitext(os.path.basename(basefilename)) + return fn + else: + return 'contents' + + def _get_fileinfo_sizes(self, pstat, subinfo, packinfo, folder, packsizes, unpacksizes, file_in_solid, numinstreams): + if pstat.input == 0: + folder.solid = subinfo.num_unpackstreams_folders[pstat.folder] > 1 + maxsize = (folder.solid and packinfo.packsizes[pstat.stream]) or None + uncompressed = unpacksizes[pstat.outstreams] + if not isinstance(uncompressed, (list, tuple)): + uncompressed = [uncompressed] * len(folder.coders) + if file_in_solid > 0: + compressed = None + elif pstat.stream < len(packsizes): # file is compressed + compressed = packsizes[pstat.stream] + else: # file is not compressed + compressed = uncompressed + packsize = packsizes[pstat.stream:pstat.stream + numinstreams] + return maxsize, compressed, uncompressed, packsize, folder.solid + + def _filelist_retrieve(self) -> None: + # Initialize references for convenience + if hasattr(self.header, 'main_streams') and self.header.main_streams is not None: + folders = self.header.main_streams.unpackinfo.folders + packinfo = self.header.main_streams.packinfo + subinfo = self.header.main_streams.substreamsinfo + packsizes = packinfo.packsizes + unpacksizes = subinfo.unpacksizes if subinfo.unpacksizes is not None else [x.unpacksizes for x in folders] + else: + subinfo = None + folders = None + packinfo = None + packsizes = [] + unpacksizes = [0] + + pstat = self.ParseStatus() + pstat.src_pos = self.afterheader + file_in_solid = 0 + + for file_id, file_info in enumerate(self.header.files_info.files): + if not file_info['emptystream'] and folders is not None: + folder = folders[pstat.folder] + numinstreams = max([coder.get('numinstreams', 1) for coder in folder.coders]) + (maxsize, compressed, uncompressed, + packsize, solid) = self._get_fileinfo_sizes(pstat, subinfo, packinfo, folder, packsizes, + unpacksizes, file_in_solid, numinstreams) + pstat.input += 1 + folder.solid = solid + file_info['folder'] = folder + file_info['maxsize'] = maxsize + file_info['compressed'] = compressed + file_info['uncompressed'] = uncompressed + file_info['packsizes'] = packsize + if subinfo.digestsdefined[pstat.outstreams]: + file_info['digest'] = subinfo.digests[pstat.outstreams] + if folder is None: + pstat.src_pos += file_info['compressed'] + else: + if folder.solid: + file_in_solid += 1 + pstat.outstreams += 1 + if folder.files is None: + folder.files = ArchiveFileList(offset=file_id) + folder.files.append(file_info) + if pstat.input >= subinfo.num_unpackstreams_folders[pstat.folder]: + file_in_solid = 0 + pstat.src_pos += sum(packinfo.packsizes[pstat.stream:pstat.stream + numinstreams]) + pstat.folder += 1 + pstat.stream += numinstreams + pstat.input = 0 + else: + file_info['folder'] = None + file_info['maxsize'] = 0 + file_info['compressed'] = 0 + file_info['uncompressed'] = [0] + file_info['packsizes'] = [0] + + if 'filename' not in file_info: + file_info['filename'] = self._gen_filename() + self.files.append(file_info) + + def _num_files(self) -> int: + if getattr(self.header, 'files_info', None) is not None: + return len(self.header.files_info.files) + return 0 + + def _set_file_property(self, outfilename: pathlib.Path, properties: Dict[str, Any]) -> None: + # creation time + creationtime = ArchiveTimestamp(properties['lastwritetime']).totimestamp() + if creationtime is not None: + os.utime(str(outfilename), times=(creationtime, creationtime)) + if os.name == 'posix': + st_mode = properties['posix_mode'] + if st_mode is not None: + outfilename.chmod(st_mode) + return + # fallback: only set readonly if specified + if properties['readonly'] and not properties['is_directory']: + ro_mask = 0o777 ^ (stat.S_IWRITE | stat.S_IWGRP | stat.S_IWOTH) + outfilename.chmod(outfilename.stat().st_mode & ro_mask) + + def _reset_decompressor(self) -> None: + if self.header.main_streams is not None and self.header.main_streams.unpackinfo.numfolders > 0: + for i, folder in enumerate(self.header.main_streams.unpackinfo.folders): + folder.decompressor = None + + def _reset_worker(self) -> None: + """Seek to where archive data start in archive and recreate new worker.""" + self.fp.seek(self.afterheader) + self.worker = Worker(self.files, self.afterheader, self.header) + + def set_encoded_header_mode(self, mode: bool) -> None: + self.encoded_header_mode = mode + + @staticmethod + def _check_7zfile(fp: Union[BinaryIO, io.BufferedReader]) -> bool: + result = MAGIC_7Z == fp.read(len(MAGIC_7Z))[:len(MAGIC_7Z)] + fp.seek(-len(MAGIC_7Z), 1) + return result + + def _get_method_names(self) -> str: + methods_names = [] # type: List[str] + for folder in self.header.main_streams.unpackinfo.folders: + methods_names += get_methods_names(folder.coders) + return ', '.join(x for x in methods_names) + + def _test_digest_raw(self, pos: int, size: int, crc: int) -> bool: + self.fp.seek(pos) + remaining_size = size + digest = None + while remaining_size > 0: + block = min(READ_BLOCKSIZE, remaining_size) + digest = calculate_crc32(self.fp.read(block), digest) + remaining_size -= block + return digest == crc + + def _prepare_write(self) -> None: + self.sig_header = SignatureHeader() + self.sig_header._write_skelton(self.fp) + self.afterheader = self.fp.tell() + self.folder.totalin = 1 + self.folder.totalout = 1 + self.folder.bindpairs = [] + self.folder.unpacksizes = [] + self.header = Header.build_header([self.folder]) + + def _write_archive(self): + self.worker.archive(self.fp, self.folder, deref=self.dereference) + # Write header and update signature header + (header_pos, header_len, header_crc) = self.header.write(self.fp, self.afterheader, + encoded=self.encoded_header_mode) + self.sig_header.nextheaderofs = header_pos - self.afterheader + self.sig_header.calccrc(header_len, header_crc) + self.sig_header.write(self.fp) + return + + def _is_solid(self): + for f in self.header.main_streams.substreamsinfo.num_unpackstreams_folders: + if f > 1: + return True + return False + + def _var_release(self): + self._dict = None + self.files = None + self.folder = None + self.header = None + self.worker = None + self.sig_header = None + + @staticmethod + def _make_file_info(target: pathlib.Path, arcname: Optional[str] = None, dereference=False) -> Dict[str, Any]: + f = {} # type: Dict[str, Any] + f['origin'] = target + if arcname is not None: + f['filename'] = pathlib.Path(arcname).as_posix() + else: + f['filename'] = target.as_posix() + if os.name == 'nt': + fstat = target.lstat() + if target.is_symlink(): + if dereference: + fstat = target.stat() + if stat.S_ISDIR(fstat.st_mode): + f['emptystream'] = True + f['attributes'] = fstat.st_file_attributes & FILE_ATTRIBUTE_WINDOWS_MASK # type: ignore # noqa + else: + f['emptystream'] = False + f['attributes'] = stat.FILE_ATTRIBUTE_ARCHIVE # type: ignore # noqa + f['uncompressed'] = fstat.st_size + else: + f['emptystream'] = False + f['attributes'] = fstat.st_file_attributes & FILE_ATTRIBUTE_WINDOWS_MASK # type: ignore # noqa + # f['attributes'] |= stat.FILE_ATTRIBUTE_REPARSE_POINT # type: ignore # noqa + elif target.is_dir(): + f['emptystream'] = True + f['attributes'] = fstat.st_file_attributes & FILE_ATTRIBUTE_WINDOWS_MASK # type: ignore # noqa + elif target.is_file(): + f['emptystream'] = False + f['attributes'] = stat.FILE_ATTRIBUTE_ARCHIVE # type: ignore # noqa + f['uncompressed'] = fstat.st_size + else: + fstat = target.lstat() + if target.is_symlink(): + if dereference: + fstat = target.stat() + if stat.S_ISDIR(fstat.st_mode): + f['emptystream'] = True + f['attributes'] = stat.FILE_ATTRIBUTE_DIRECTORY # type: ignore # noqa + f['attributes'] |= FILE_ATTRIBUTE_UNIX_EXTENSION | (stat.S_IFDIR << 16) + f['attributes'] |= (stat.S_IMODE(fstat.st_mode) << 16) + else: + f['emptystream'] = False + f['attributes'] = stat.FILE_ATTRIBUTE_ARCHIVE # type: ignore # noqa + f['attributes'] |= FILE_ATTRIBUTE_UNIX_EXTENSION | (stat.S_IMODE(fstat.st_mode) << 16) + else: + f['emptystream'] = False + f['attributes'] = stat.FILE_ATTRIBUTE_ARCHIVE | stat.FILE_ATTRIBUTE_REPARSE_POINT # type: ignore # noqa + f['attributes'] |= FILE_ATTRIBUTE_UNIX_EXTENSION | (stat.S_IFLNK << 16) + f['attributes'] |= (stat.S_IMODE(fstat.st_mode) << 16) + elif target.is_dir(): + f['emptystream'] = True + f['attributes'] = stat.FILE_ATTRIBUTE_DIRECTORY # type: ignore # noqa + f['attributes'] |= FILE_ATTRIBUTE_UNIX_EXTENSION | (stat.S_IFDIR << 16) + f['attributes'] |= (stat.S_IMODE(fstat.st_mode) << 16) + elif target.is_file(): + f['emptystream'] = False + f['uncompressed'] = fstat.st_size + f['attributes'] = stat.FILE_ATTRIBUTE_ARCHIVE # type: ignore # noqa + f['attributes'] |= FILE_ATTRIBUTE_UNIX_EXTENSION | (stat.S_IMODE(fstat.st_mode) << 16) + + f['creationtime'] = fstat.st_ctime + f['lastwritetime'] = fstat.st_mtime + f['lastaccesstime'] = fstat.st_atime + return f + + # -------------------------------------------------------------------------- + # The public methods which SevenZipFile provides: + def getnames(self) -> List[str]: + """Return the members of the archive as a list of their names. It has + the same order as the list returned by getmembers(). + """ + return list(map(lambda x: x.filename, self.files)) + + def archiveinfo(self) -> ArchiveInfo: + fstat = os.stat(self.filename) + uncompressed = 0 + for f in self.files: + uncompressed += f.uncompressed_size + return ArchiveInfo(self.filename, fstat.st_size, self.header.size, self._get_method_names(), + self._is_solid(), len(self.header.main_streams.unpackinfo.folders), + uncompressed) + + def list(self) -> List[FileInfo]: + """Returns contents information """ + alist = [] # type: List[FileInfo] + creationtime = None # type: Optional[datetime.datetime] + for f in self.files: + if f.lastwritetime is not None: + creationtime = filetime_to_dt(f.lastwritetime) + alist.append(FileInfo(f.filename, f.compressed, f.uncompressed_size, f.archivable, f.is_directory, + creationtime, f.crc32)) + return alist + + def readall(self) -> Optional[Dict[str, IO[Any]]]: + return self._extract(path=None, return_dict=True) + + def extractall(self, path: Optional[Any] = None, callback: Optional[ExtractCallback] = None) -> None: + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. + """ + self._extract(path=path, return_dict=False, callback=callback) + + def read(self, targets: Optional[List[str]] = None) -> Optional[Dict[str, IO[Any]]]: + return self._extract(path=None, targets=targets, return_dict=True) + + def extract(self, path: Optional[Any] = None, targets: Optional[List[str]] = None) -> None: + self._extract(path, targets, return_dict=False) + + def _extract(self, path: Optional[Any] = None, targets: Optional[List[str]] = None, + return_dict: bool = False, callback: Optional[ExtractCallback] = None) -> Optional[Dict[str, IO[Any]]]: + if callback is not None and not isinstance(callback, ExtractCallback): + raise ValueError('Callback specified is not a subclass of py7zr.callbacks.ExtractCallback class') + elif callback is not None: + self.reporterd = threading.Thread(target=self.reporter, args=(callback,), daemon=True) + self.reporterd.start() + target_junction = [] # type: List[pathlib.Path] + target_sym = [] # type: List[pathlib.Path] + target_files = [] # type: List[Tuple[pathlib.Path, Dict[str, Any]]] + target_dirs = [] # type: List[pathlib.Path] + if path is not None: + if isinstance(path, str): + path = pathlib.Path(path) + try: + if not path.exists(): + path.mkdir(parents=True) + else: + pass + except OSError as e: + if e.errno == errno.EEXIST and path.is_dir(): + pass + else: + raise e + fnames = [] # type: List[str] # check duplicated filename in one archive? + self.q.put(('pre', None, None)) + for f in self.files: + # TODO: sanity check + # check whether f.filename with invalid characters: '../' + if f.filename.startswith('../'): + raise Bad7zFile + # When archive has a multiple files which have same name + # To guarantee order of archive, multi-thread decompression becomes off. + # Currently always overwrite by latter archives. + # TODO: provide option to select overwrite or skip. + if f.filename not in fnames: + outname = f.filename + else: + i = 0 + while True: + outname = f.filename + '_%d' % i + if outname not in fnames: + break + fnames.append(outname) + if path is not None: + outfilename = path.joinpath(outname) + else: + outfilename = pathlib.Path(outname) + if os.name == 'nt': + if outfilename.is_absolute(): + # hack for microsoft windows path length limit < 255 + outfilename = pathlib.WindowsPath('\\\\?\\' + str(outfilename)) + if targets is not None and f.filename not in targets: + self.worker.register_filelike(f.id, None) + continue + if f.is_directory: + if not outfilename.exists(): + target_dirs.append(outfilename) + target_files.append((outfilename, f.file_properties())) + else: + pass + elif f.is_socket: + pass + elif return_dict: + fname = outfilename.as_posix() + _buf = io.BytesIO() + self._dict[fname] = _buf + self.worker.register_filelike(f.id, MemIO(_buf)) + elif f.is_symlink: + target_sym.append(outfilename) + try: + if outfilename.exists(): + outfilename.unlink() + except OSError as ose: + if ose.errno not in [errno.ENOENT]: + raise + self.worker.register_filelike(f.id, outfilename) + elif f.is_junction: + target_junction.append(outfilename) + self.worker.register_filelike(f.id, outfilename) + else: + self.worker.register_filelike(f.id, outfilename) + target_files.append((outfilename, f.file_properties())) + for target_dir in sorted(target_dirs): + try: + target_dir.mkdir() + except FileExistsError: + if target_dir.is_dir(): + pass + elif target_dir.is_file(): + raise DecompressionError("Directory {} is existed as a normal file.".format(str(target_dir))) + else: + raise DecompressionError("Directory {} making fails on unknown condition.".format(str(target_dir))) + + try: + if callback is not None: + self.worker.extract(self.fp, parallel=(not self.password_protected and not self._filePassed), q=self.q) + else: + self.worker.extract(self.fp, parallel=(not self.password_protected and not self._filePassed)) + except CrcError as ce: + raise Bad7zFile("CRC32 error on archived file {}.".format(str(ce))) + + self.q.put(('post', None, None)) + if return_dict: + return self._dict + else: + # create symbolic links on target path as a working directory. + # if path is None, work on current working directory. + for t in target_sym: + sym_dst = t.resolve() + with sym_dst.open('rb') as b: + sym_src = b.read().decode(encoding='utf-8') # symlink target name stored in utf-8 + sym_dst.unlink() # unlink after close(). + sym_dst.symlink_to(pathlib.Path(sym_src)) + # create junction point only on windows platform + if sys.platform.startswith('win'): + for t in target_junction: + junction_dst = t.resolve() + with junction_dst.open('rb') as b: + junction_target = pathlib.Path(b.read().decode(encoding='utf-8')) + junction_dst.unlink() + _winapi.CreateJunction(junction_target, str(junction_dst)) # type: ignore # noqa + # set file properties + for o, p in target_files: + self._set_file_property(o, p) + return None + + def reporter(self, callback: ExtractCallback): + while True: + try: + item = self.q.get(timeout=1) # type: Optional[Tuple[str, str, str]] + except queue.Empty: + pass + else: + if item is None: + break + elif item[0] == 's': + callback.report_start(item[1], item[2]) + elif item[0] == 'e': + callback.report_end(item[1], item[2]) + elif item[0] == 'pre': + callback.report_start_preparation() + elif item[0] == 'post': + callback.report_postprocess() + elif item[0] == 'w': + callback.report_warning(item[1]) + else: + pass + self.q.task_done() + + def writeall(self, path: Union[pathlib.Path, str], arcname: Optional[str] = None): + """Write files in target path into archive.""" + if isinstance(path, str): + path = pathlib.Path(path) + if not path.exists(): + raise ValueError("specified path does not exist.") + if path.is_dir() or path.is_file(): + self._writeall(path, arcname) + else: + raise ValueError("specified path is not a directory or a file") + + def _writeall(self, path, arcname): + try: + if path.is_symlink() and not self.dereference: + self.write(path, arcname) + elif path.is_file(): + self.write(path, arcname) + elif path.is_dir(): + if not path.samefile('.'): + self.write(path, arcname) + for nm in sorted(os.listdir(str(path))): + arc = os.path.join(arcname, nm) if arcname is not None else None + self._writeall(path.joinpath(nm), arc) + else: + return # pathlib ignores ELOOP and return False for is_*(). + except OSError as ose: + if self.dereference and ose.errno in [errno.ELOOP]: + return # ignore ELOOP here, this resulted to stop looped symlink reference. + elif self.dereference and sys.platform == 'win32' and ose.errno in [errno.ENOENT]: + return # ignore ENOENT which is happened when a case of ELOOP on windows. + else: + raise + + def write(self, file: Union[pathlib.Path, str], arcname: Optional[str] = None): + """Write single target file into archive(Not implemented yet).""" + if isinstance(file, str): + path = pathlib.Path(file) + elif isinstance(file, pathlib.Path): + path = file + else: + raise ValueError("Unsupported file type.") + file_info = self._make_file_info(path, arcname, self.dereference) + self.files.append(file_info) + + def close(self): + """Flush all the data into archive and close it. + When close py7zr start reading target and writing actual archive file. + """ + if 'w' in self.mode: + self._write_archive() + if 'r' in self.mode: + if self.reporterd is not None: + self.q.put_nowait(None) + self.reporterd.join(1) + if self.reporterd.is_alive(): + raise InternalError("Progress report thread terminate error.") + self.reporterd = None + self._fpclose() + self._var_release() + + def reset(self) -> None: + """When read mode, it reset file pointer, decompress worker and decompressor""" + if self.mode == 'r': + self._reset_worker() + self._reset_decompressor() + + def test(self) -> Optional[bool]: + self._reset_worker() + crcs = self.header.main_streams.packinfo.crcs # type: Optional[List[int]] + if crcs is None or len(crcs) == 0: + return None + # check packed stream's crc + assert len(crcs) == len(self.header.main_streams.packinfo.packpositions) + for i, p in enumerate(self.header.main_streams.packinfo.packpositions): + if not self._test_digest_raw(p, self.header.main_streams.packinfo.packsizes[i], crcs[i]): + return False + return True + + def testzip(self) -> Optional[str]: + self._reset_worker() + for f in self.files: + self.worker.register_filelike(f.id, None) + try: + self.worker.extract(self.fp, parallel=(not self.password_protected)) # TODO: print progress + except CrcError as crce: + return str(crce) + else: + return None + + +# -------------------- +# exported functions +# -------------------- +def is_7zfile(file: Union[BinaryIO, str, pathlib.Path]) -> bool: + """Quickly see if a file is a 7Z file by checking the magic number. + The file argument may be a filename or file-like object too. + """ + result = False + try: + if isinstance(file, io.IOBase) and hasattr(file, "read"): + result = SevenZipFile._check_7zfile(file) # type: ignore # noqa + elif isinstance(file, str): + with open(file, 'rb') as fp: + result = SevenZipFile._check_7zfile(fp) + elif isinstance(file, pathlib.Path) or isinstance(file, pathlib.PosixPath) or \ + isinstance(file, pathlib.WindowsPath): + with file.open(mode='rb') as fp: # type: ignore # noqa + result = SevenZipFile._check_7zfile(fp) + else: + raise TypeError('invalid type: file should be str, pathlib.Path or BinaryIO, but {}'.format(type(file))) + except OSError: + pass + return result + + +def unpack_7zarchive(archive, path, extra=None): + """Function for registering with shutil.register_unpack_format()""" + arc = SevenZipFile(archive) + arc.extractall(path) + arc.close() + + +def pack_7zarchive(base_name, base_dir, owner=None, group=None, dry_run=None, logger=None): + """Function for registering with shutil.register_archive_format()""" + target_name = '{}.7z'.format(base_name) + archive = SevenZipFile(target_name, mode='w') + archive.writeall(path=base_dir) + archive.close() diff --git a/py7zr/win32compat.py b/py7zr/win32compat.py new file mode 100644 index 0000000..dc72bfd --- /dev/null +++ b/py7zr/win32compat.py @@ -0,0 +1,174 @@ +import pathlib +import stat +import sys +from logging import getLogger +from typing import Union + +if sys.platform == "win32": + import ctypes + from ctypes.wintypes import BOOL, DWORD, HANDLE, LPCWSTR, LPDWORD, LPVOID, LPWSTR + + _stdcall_libraries = {} + _stdcall_libraries['kernel32'] = ctypes.WinDLL('kernel32') + CloseHandle = _stdcall_libraries['kernel32'].CloseHandle + CreateFileW = _stdcall_libraries['kernel32'].CreateFileW + DeviceIoControl = _stdcall_libraries['kernel32'].DeviceIoControl + GetFileAttributesW = _stdcall_libraries['kernel32'].GetFileAttributesW + OPEN_EXISTING = 3 + GENERIC_READ = 2147483648 + FILE_FLAG_OPEN_REPARSE_POINT = 0x00200000 + FSCTL_GET_REPARSE_POINT = 0x000900A8 + FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 + IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 + IO_REPARSE_TAG_SYMLINK = 0xA000000C + MAXIMUM_REPARSE_DATA_BUFFER_SIZE = 16 * 1024 + + def _check_bit(val: int, flag: int) -> bool: + return bool(val & flag == flag) + + class SymbolicLinkReparseBuffer(ctypes.Structure): + """ Implementing the below in Python: + + typedef struct _REPARSE_DATA_BUFFER { + ULONG ReparseTag; + USHORT ReparseDataLength; + USHORT Reserved; + union { + struct { + USHORT SubstituteNameOffset; + USHORT SubstituteNameLength; + USHORT PrintNameOffset; + USHORT PrintNameLength; + ULONG Flags; + WCHAR PathBuffer[1]; + } SymbolicLinkReparseBuffer; + struct { + USHORT SubstituteNameOffset; + USHORT SubstituteNameLength; + USHORT PrintNameOffset; + USHORT PrintNameLength; + WCHAR PathBuffer[1]; + } MountPointReparseBuffer; + struct { + UCHAR DataBuffer[1]; + } GenericReparseBuffer; + } DUMMYUNIONNAME; + } REPARSE_DATA_BUFFER, *PREPARSE_DATA_BUFFER; + """ + # See https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/content/ntifs/ns-ntifs-_reparse_data_buffer + _fields_ = [ + ('flags', ctypes.c_ulong), + ('path_buffer', ctypes.c_byte * (MAXIMUM_REPARSE_DATA_BUFFER_SIZE - 20)) + ] + + class MountReparseBuffer(ctypes.Structure): + _fields_ = [ + ('path_buffer', ctypes.c_byte * (MAXIMUM_REPARSE_DATA_BUFFER_SIZE - 16)), + ] + + class ReparseBufferField(ctypes.Union): + _fields_ = [ + ('symlink', SymbolicLinkReparseBuffer), + ('mount', MountReparseBuffer) + ] + + class ReparseBuffer(ctypes.Structure): + _anonymous_ = ("u",) + _fields_ = [ + ('reparse_tag', ctypes.c_ulong), + ('reparse_data_length', ctypes.c_ushort), + ('reserved', ctypes.c_ushort), + ('substitute_name_offset', ctypes.c_ushort), + ('substitute_name_length', ctypes.c_ushort), + ('print_name_offset', ctypes.c_ushort), + ('print_name_length', ctypes.c_ushort), + ('u', ReparseBufferField) + ] + + def is_reparse_point(path: Union[str, pathlib.Path]) -> bool: + GetFileAttributesW.argtypes = [LPCWSTR] + GetFileAttributesW.restype = DWORD + return _check_bit(GetFileAttributesW(str(path)), stat.FILE_ATTRIBUTE_REPARSE_POINT) + + def readlink(path: Union[str, pathlib.Path]) -> Union[str, pathlib.WindowsPath]: + # FILE_FLAG_OPEN_REPARSE_POINT alone is not enough if 'path' + # is a symbolic link to a directory or a NTFS junction. + # We need to set FILE_FLAG_BACKUP_SEMANTICS as well. + # See https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-createfilea + + # description from _winapi.c:601 + # /* REPARSE_DATA_BUFFER usage is heavily under-documented, especially for + # junction points. Here's what I've learned along the way: + # - A junction point has two components: a print name and a substitute + # name. They both describe the link target, but the substitute name is + # the physical target and the print name is shown in directory listings. + # - The print name must be a native name, prefixed with "\??\". + # - Both names are stored after each other in the same buffer (the + # PathBuffer) and both must be NUL-terminated. + # - There are four members defining their respective offset and length + # inside PathBuffer: SubstituteNameOffset, SubstituteNameLength, + # PrintNameOffset and PrintNameLength. + # - The total size we need to allocate for the REPARSE_DATA_BUFFER, thus, + # is the sum of: + # - the fixed header size (REPARSE_DATA_BUFFER_HEADER_SIZE) + # - the size of the MountPointReparseBuffer member without the PathBuffer + # - the size of the prefix ("\??\") in bytes + # - the size of the print name in bytes + # - the size of the substitute name in bytes + # - the size of two NUL terminators in bytes */ + + target_is_path = isinstance(path, pathlib.Path) + if target_is_path: + target = str(path) + else: + target = path + CreateFileW.argtypes = [LPWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE] + CreateFileW.restype = HANDLE + DeviceIoControl.argtypes = [HANDLE, DWORD, LPVOID, DWORD, LPVOID, DWORD, LPDWORD, LPVOID] + DeviceIoControl.restype = BOOL + handle = HANDLE(CreateFileW(target, GENERIC_READ, 0, None, OPEN_EXISTING, + FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OPEN_REPARSE_POINT, 0)) + buf = ReparseBuffer() + ret = DWORD(0) + status = DeviceIoControl(handle, FSCTL_GET_REPARSE_POINT, None, 0, ctypes.byref(buf), + MAXIMUM_REPARSE_DATA_BUFFER_SIZE, ctypes.byref(ret), None) + CloseHandle(handle) + if not status: + logger = getLogger(__file__) + logger.error("Failed IOCTL access to REPARSE_POINT {})".format(target)) + raise ValueError("not a symbolic link or access permission violation") + + if buf.reparse_tag == IO_REPARSE_TAG_SYMLINK: + offset = buf.substitute_name_offset + ending = offset + buf.substitute_name_length + rpath = bytearray(buf.symlink.path_buffer)[offset:ending].decode('UTF-16-LE') + elif buf.reparse_tag == IO_REPARSE_TAG_MOUNT_POINT: + offset = buf.substitute_name_offset + ending = offset + buf.substitute_name_length + rpath = bytearray(buf.mount.path_buffer)[offset:ending].decode('UTF-16-LE') + else: + raise ValueError("not a symbolic link") + # on posixmodule.c:7859 in py38, we do that + # ``` + # else if (rdb->ReparseTag == IO_REPARSE_TAG_MOUNT_POINT) + # { + # name = (wchar_t *)((char*)rdb->MountPointReparseBuffer.PathBuffer + + # rdb->MountPointReparseBuffer.SubstituteNameOffset); + # nameLen = rdb->MountPointReparseBuffer.SubstituteNameLength / sizeof(wchar_t); + # } + # else + # { + # PyErr_SetString(PyExc_ValueError, "not a symbolic link"); + # } + # if (nameLen > 4 && wcsncmp(name, L"\\??\\", 4) == 0) { + # /* Our buffer is mutable, so this is okay */ + # name[1] = L'\\'; + # } + # ``` + # so substitute prefix here. + if rpath.startswith('\\??\\'): + rpath = '\\\\' + rpath[2:] + if target_is_path: + return pathlib.WindowsPath(rpath) + else: + return rpath diff --git a/requests/__init__.py b/requests/__init__.py new file mode 100644 index 0000000..626247c --- /dev/null +++ b/requests/__init__.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- + +# __ +# /__) _ _ _ _ _/ _ +# / ( (- (/ (/ (- _) / _) +# / + +""" +Requests HTTP Library +~~~~~~~~~~~~~~~~~~~~~ + +Requests is an HTTP library, written in Python, for human beings. +Basic GET usage: + + >>> import requests + >>> r = requests.get('https://www.python.org') + >>> r.status_code + 200 + >>> b'Python is a programming language' in r.content + True + +... or POST: + + >>> payload = dict(key1='value1', key2='value2') + >>> r = requests.post('https://httpbin.org/post', data=payload) + >>> print(r.text) + { + ... + "form": { + "key1": "value1", + "key2": "value2" + }, + ... + } + +The other HTTP methods are supported - see `requests.api`. Full documentation +is at . + +:copyright: (c) 2017 by Kenneth Reitz. +:license: Apache 2.0, see LICENSE for more details. +""" + +import urllib3 +import chardet +import warnings +from .exceptions import RequestsDependencyWarning + + +def check_compatibility(urllib3_version, chardet_version): + urllib3_version = urllib3_version.split('.') + assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git. + + # Sometimes, urllib3 only reports its version as 16.1. + if len(urllib3_version) == 2: + urllib3_version.append('0') + + # Check urllib3 for compatibility. + major, minor, patch = urllib3_version # noqa: F811 + major, minor, patch = int(major), int(minor), int(patch) + # urllib3 >= 1.21.1, <= 1.25 + assert major == 1 + assert minor >= 21 + assert minor <= 25 + + # Check chardet for compatibility. + major, minor, patch = chardet_version.split('.')[:3] + major, minor, patch = int(major), int(minor), int(patch) + # chardet >= 3.0.2, < 3.1.0 + assert major == 3 + assert minor < 1 + assert patch >= 2 + + +def _check_cryptography(cryptography_version): + # cryptography < 1.3.4 + try: + cryptography_version = list(map(int, cryptography_version.split('.'))) + except ValueError: + return + + if cryptography_version < [1, 3, 4]: + warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version) + warnings.warn(warning, RequestsDependencyWarning) + +# Check imported dependencies for compatibility. +try: + check_compatibility(urllib3.__version__, chardet.__version__) +except (AssertionError, ValueError): + warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported " + "version!".format(urllib3.__version__, chardet.__version__), + RequestsDependencyWarning) + +# Attempt to enable urllib3's SNI support, if possible +try: + from urllib3.contrib import pyopenssl + pyopenssl.inject_into_urllib3() + + # Check cryptography version + from cryptography import __version__ as cryptography_version + _check_cryptography(cryptography_version) +except ImportError: + pass + +# urllib3's DependencyWarnings should be silenced. +from urllib3.exceptions import DependencyWarning +warnings.simplefilter('ignore', DependencyWarning) + +from .__version__ import __title__, __description__, __url__, __version__ +from .__version__ import __build__, __author__, __author_email__, __license__ +from .__version__ import __copyright__, __cake__ + +from . import utils +from . import packages +from .models import Request, Response, PreparedRequest +from .api import request, get, head, post, patch, put, delete, options +from .sessions import session, Session +from .status_codes import codes +from .exceptions import ( + RequestException, Timeout, URLRequired, + TooManyRedirects, HTTPError, ConnectionError, + FileModeWarning, ConnectTimeout, ReadTimeout +) + +# Set default logging handler to avoid "No handler found" warnings. +import logging +from logging import NullHandler + +logging.getLogger(__name__).addHandler(NullHandler()) + +# FileModeWarnings go off per the default. +warnings.simplefilter('default', FileModeWarning, append=True) diff --git a/requests/__pycache__/__init__.cpython-38.pyc b/requests/__pycache__/__init__.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e8d5e982e478a652a6cc26beb806980274709ec GIT binary patch literal 3369 zcma)8OH&-l5$?x4ng#(vNCG`}OGvvg60`ulM*=<3?%J$Yfku|Lf^e&uDh;ja>TXxn z2n`E981K>l0gk@-?1L|k@DK38r@s1PMfex&!FFa(4 zt^vP4|Kkt8d^KVif5*nr-vBfoz+e5-Gz?}?gPF{VOlszDi(2{HrZ%%#DJqFFEt>`_ zv&wG`T47b@z`MFV#w*->=Wy$tvuBycD_c~w@m{dZtoAWf&HPr<$=Y^ z2VniZ5nHc~?OB+~jgE<7It;eM`y&T126ThbsBq|U&{g^IJu~+{4&KMW^PhU2dmg}c zC^{j=>3BZZNp_5nqko8eIC8`(dP z^Ypy&Bxm#WbB3GjvAOXiZ$E zm&Fx&Ma+_5iN;jx(sVy+P}{q9GLqL`SPA|06WI*cPH5- zud_4k?C)$CWxX(J=YpRs!*JL5uR2_5MZRt{o|DyZLk6k9_feq2Ci!ZmK{_gE@w)30LY9}8Nhs1p zY9fnp`f5p*4s~9O>zV0Ity5LMdUb1S%S-clyhOI9YlkB|r32kjeluYlm5cN9{l1Oq zUStI0C#g(YG7thHBT*2yIuOt_hA+Kl62nq7j%&yccU{l(2rTCLcP|>BVorl@lmrYy zWMNa!w0U=7Y5IK-b@;;cEP>`?(Of(d-q8i6i8`_(T-in#<8rQI5BOk#4wi~(4p*4U zFxE5D)BJ8H>O((sK(o_3;$u%y`$u%Uqtli0=G z*W_M5lZOK&4;V}W#>xwmWzTi%%_Qy0u(he{WTshzTrS)q8(s2q9>)*^S>vJJ_S||D zHhHXgojgo~Cd9hv&Cll1tbX%&<=Kmsyb?tM;WDm+FoH__h&1@@Bha2vGqcJ5Ie5ZT z9y1;{yRQNnLuoYOlpOxbAeG?%82;*S06WG$9Kr*07bIr2%(ks2wD}uTTRX;{$?Ucz zJ+J}t#DlnFtlPWRuDx5*r5$U}1hjK3?U}Y=8NI$8i~!f zWatvyU2qHF^F7?UFO1iD`QRPfvfGAiu=0L+2e^CJ)Rq0}j;WpeCd1@Zo=L30JR=e> z_m9}zzW!arNU^shc}tqIn`-2@bhArle%P%M5p+p2=qNa{QL@F7t=Ov#WThxnI~H-SvASmq#&58;wnFz)fYVhqrhuaL7|Km?pHoEc|IbmHbTJ@a5eaHZ54Y% z2qGJ8B+0iy++E`g+D)yp0sOS+71PM$1c_#gF7Jo0@(RvRa$ahA zCgl(q$+HNbAs`XZK6T|^1rk1Yrciq#jNidZfKGIRwSeSChmM{G`8n8SgWu#|Hu?JL zSYhZL3F8u44Jr5liul_w(p<_fK_#aVW)Nx!ml3WY%p%MocmSCpucADUaz(&aMoJ=! z$wh=~sJ7uMme;X)1K}pZEd(qFc^lylLLDG;iYid@F6!=~Zs^E2m6A)SUq-l(@BrZ< z!XpHvK=}k=1>q?`R*jNY3ocXn6}ASTDpx@PDTIQL1A#}(4HzpDdxSNR{bf!a&OKOL zC*sRUvuYKfbkQ;K8M2*8XUHhph&4(gfJM=N$iBpOwuH?u=yYS z4+O7z5w9NoH}+^R9zFNu8|!M@7xMVNFZq)9lGE`xMiAeBetG+?gV1G5ZU$&*AEtW@ zi6eq|hje(CbomNd;XTshebVOxGT=ipUpr8f~pEA}cN$;k*kEj_1DAH9ne5 zj`2Hf@ljK;+9hGL^xn9=ot@>@#0tiJDY!0NB6S#KN%ka3C3@6FVf;eWweTfA5z>dS zQHlNOwPIT#(e^yV2Zj};z*&-phlkqCtvoM1&eAN!x9rV5=;+6XPx#lz_g^u9S6lI7 zM5&iv2}&3DDdoZyRvIsL4Z@Jp#wq}-l+J{8*D5&98mTzwF{P}5J}uz2e^^vZ{&gu? zh}yM>k0~vdwE>NQvMDTlMz$^MPr7<@xR;+gxGA49#j5S%xKVIo=8NcQrFo;ovwr)t>yUb#yW= literal 0 HcmV?d00001 diff --git a/requests/__pycache__/_internal_utils.cpython-38.pyc b/requests/__pycache__/_internal_utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6ae5c0c0b526d42686545de6dde76b5c2f9858c GIT binary patch literal 1303 zcmZ8h&5qkP5GE!0Cu-JM1U>Z-@E8M)Q3N>|MbX73Z384hkljUF*Qi2^vk|)zsgjhp zHk=%iJV^XJ8x9(q*j)>f;+|yupGUn0>S!R<%X>` z?D4)vdRfT4U;=7Tf@#Iz7iXXXj5>!)?}Y|f0c8!wvKzL+aBeYgRdY8@|Dt%wY1|H_ z&Famq_M=rJOD9!kooQpg47kmu%p;8JheycIW1ImcM|Q-~mytQ#k!O+m#6^bf$m@7V z-%#{Zw3E)>MTipJ{{FRGgJPUv1*x7gz4{4q_bD^*lp9fkwM?Hgx2eJJxyB$S9Ie`b z1?9L$2_sU5BVe`9BNe*96b7uJPu`^gm22T^aYRbM-nlHmKuDt?$FkgPX6&?(Sk8Lj zEjv0nzITr`Dm3MEi7;acb15y%!olgFO{JB}I<9gs9vI`%YKNgDw8KyhJjz|0?YM8I zjdkPR5_1*f|Nh~4zC5!?)AE9syjmXB^&Rf`^1jY_xjcocUe>a{87h_K&Gl#7)!N+N zE_=cKXMCW%S-oiwoz6N~Jhh0!#FOXil0iy`Q9`dqX=MCv?lsX5Z;x>XuIFggxaS2oJjXM3mIk!vHEvX%TGs4uGXF4<(=0C_znHNHjdg4VeL%?IHRCm) zv}2Dpu0SQ-VoxyPla9~1@q(GNmDXiAU

Y)2|**2WE(t!9NYsPL3>6<#l-17;VhR zZ(cj=Zgkgc4Z?>hZO471f#z;G@ZpDh05W`h;gwy--Hwydghmv3p%ZGZ;qJpJ4Q)(E x?I_EH&awJmh6!%Sz1S0%eL$|j4Yd?s923mDIygoQ^l*{&QM%( z|8!?o61PjoFrCCH;>P_!TA+EL(x7efP!t6kr0CAcXZMJF>&z%e!-7a zMe!6z@l;Q%sgB0~x})P?uNn1>lc{H&tV;KdTCQ$7Cf75yk-FtrT+gCD>g2hetBuvi zopG+4wTXJcDR6xR^+{)v>lW%$&K|ChqCV|Rb3Kpxea?McA47e{xu5IfsPA?5a(x2z zea=3v7f^q|d64UqsPA|7bA1Z+1I_`|_juE_hw2YI4|Dy#nq5EW9IPL54)OI&?UDLn z=WzXqbEN*L^Jx93bF_ZUIaV(^#rk8;W8CNd+Q;h0o#R~JTRTxNIVG;|t36&n>71-T z;XF})(s>f|cmVUAsXygBRX^pNsz2>KU4O=ThWkEH`*{6X=UG+h6oADCYoDmkI&KIZ@+io4aJ%LywcDN_3H4)vX z$I$%l^8D(}O5t#fYZUL!iGU0e3+t!u4n>jW4wbZfN}w@!9eZi!Q! zYYAWXDhY>{TASh+K$q|fXbFnS*XQ3MJut9Hx|yK@pWsOpzT#kkB>ElwE6OSqfv3M= zI)*pu6Q&n(8dbvxdd*Iknt{H z=GE2BfrTzq8Dp3@bcydaj@wXTx7VtbHM>FtjiqCvp=j0uG)wf}_H79%g-^@xVdrRI zFJD};E4~P;t5t}8KVG>M>F@K1dOHY#Iv|IH_#oIOTIc8Nm3G5}rpA6i{iZ$Qh0&u1 zmNnPdq<-QAw#PZ!|qvZh8ODCxjHD&_oQabiKoDDQ7#@4WhK!@xp+IHtVDBU(E6fW z{BX(gBuJU>NhmbD=ZJq<@D)0Re#WPpyM)jOaD3t?18d_dwId{*RVzF95 z^S~!n2|H-+R|%vJJj2U)*&7+h@H6;k{oJa-s)7{6h-1bIVgxE94@F_&zUqy6<4_)> z-h@{`Ti%=Wrtm*TRyqF1y~Ey&cRzYeczeBlXeoFPcn{)#GPZ)JyobDpalOYo!gj9e zOtS*I55oD;Xp{`QSjo6wHxz})bERFah0vQe-L&JoEW2RM)T%2_m84|8EV+Ut(+$7j z3%7Pcayy7qthyDS$6QGaUKnEmsGW^ok_Irk@rqvGk=?Nu|_i*Ho-M6`5?!h7?$xVEkagmk8>GsRHr5T>?V0rZo390nUbB?#7rB_ z5J2Jf*mABZq~1J&l6_&--fXrp4J_scY_N9C6^Y`qi3a#-7~;WNCrV~ANnuHa2fZer zLXg1dqdLxvyV2Y)%*jFUe6)JpSXPNF+YQXMn&r$&ndJADX zv*YdKp3H~64hbb+4d6-6&L(EonKNgSi6;1ur$cuF`?+L2K`CaJW5vD>Zhr?ic9~0k z^5s|`Q9k!n|InaRZ?+rZv7>8Y*qVL(@uRSJQ7HD53(J z`-GIZe=#Esg~Be}<@-@5pKNgxPf@TsObrEo*iAekm`0)KrdrVQYF5o^;~Gl*cc%L` zJ#`s&Sr4=WybA<})gbWyhWVzluAEc0$#>8f2L_QjK)Bh1U+@45+K?|yZK+)~RNll^ zc*?f^o7zodNfD3ZHjtfzM-Bld;6iGplW-$Z@3T5c$4Ay_GQ%p(dh5&^1UZW$3kOzH zI>!eW0$^|@Ls%Q7(_lci7Mv;d2IFyXsOSmgQ1wh3uJMy#mh?bu)3c^{d9FX^SPWi(0HEm1V(nGzgfC%+%kl@W}5G9CnJ%e8sznrJ0 zq^T{6i-1)$#bx|SLd&p~7UcqoJ@Fa3Ge}=WLsCNtqUxHpV}-hc|NI{6Ue5(Ru3zktY}*^kz#l+`X)T-HK;m0GcKV&`uf zUE{a4Un3?pJRKxuxHCZM1$e?$gg`pb3~s8aFN0U&ct))1$sv&eKDaDRFOjLFOvt8y zaREg^Jjz}IM6d@9NunG&l5}G#&HXzqinK`^3jPp$pa#MmD{O4U_D$M3F;4o3!akiy zuKh_N}m6Wf>5fwHGLcXK(nMx0?bci@4Dy4+;92kPah(WTD(}*5I_$cDmIx zG5RbQjc5es7ORdR%EgNoFM-h5tAdzVgcKcQ1VJnsjgq_^611az8dtdJB_VOwc;-XA z!i@GC=*eLnWlYLbQ-v_PXQY^ZmY9Yn3}s^S$04Fu{SK#Z+SXmr(86*`TCMo(~P-^_G1VkraTC8X0z<5XAK zF}r}Vl?_LD9Eq9UC>@NrYgAV<cgo*^R-!4NPFtH3^L|ERJj zuS4<^SEbdWWR=>;wkXEIkqqNF%JW~$d&JNtKHxKq>Bs#U=wZATl|-y1|>}DDMBJ&@BZzhKPdXs$6Oh15t+j%wTc3 zvoOyw4l?MFg!MpELAx4~y}_~P`MH5$uM8ej;F_; zA4Q}!gUF!Rc$D?;RFT?_au+ViJYp7dt?q`=2nC4|I=UG|nQGuwMPydoGBpO#Xk05( zvsFb355l;Cdou)Fj-(|-CT3QqS}`lV1cOYWNC#<(Ptt5o(*Tnn1sib43c_<}4szFi zh@Jt(Y?b_!X29DtG($DvIAkGlvw9ZpgQ=OZ=V_=GzIncyMi1j&R+Imo`}<{XAN&fb ziR3fjVZDLYP}zo=gX|g%Bv0E$K!7|9zSgVoHn-t*l#LMbcirH6mOPJg4%fML6E%~} zp>-=9-PXh_RNKjW27(HD*=mpkw5SGos;`fe*FxSJ{&mPx1SZM_Hjy&!_h2HOMNk$w zG$elFyjPkn>-h#r6T+CW)L`c%H7HW4C*o5AvB->LMIYL)Nn~USk8-!r^#A>SO%j_* zJrPi*lx_(chO&M3`=tL8FF*#VLuvk`C`cC(+f;O8P#7m&)4Zhg7ci&T7)qV9D=}VE znB+IOVLMoiaf`#9PEj8V{8!LSTyx~-fqpI<9KJZfY2=y^=gkvP4@F3HuWci8 z0h{I=I1K(fU4<0GP`BX6uj{aD3~(V?49X684(`lSEj<)PZs zfqf3daE5X&jh&we!cC~czBDBg8)UFj@SnM=N%;pvJ7Ny#gFoaa%%|Z9a=udpMh2IrPSRTsV7)6GIcQH7RGJBu)fr%&!NI$tpeg4(3V`H< zlM@fkp{#2=I!6KX6cv@NH>c%6jLx^tRl&)~!clt4MQWSVXE^eJV-Bz@`<&|ewh)M) z)HdUM4<}ubxgxI-yBE8Aycq-si9Gn`1O8&~9722wfRegkIrOGT33jr#5lBZQ0(BAc zi8dfY7T9-%^!&0MRt$=o+!Wf1A#8I#_jy7w`AbUHf=||&?3YEnf&mFMf{rO3uj7Iw z(0VL`AazjRR@Ze%9=&-8-i^8e-wM7~mgN&mrFDJBfSe(DlkrFzK@zQJx8PriYu#*c zjpPwQL5s<;$LHa_Tw381euPU960v8hIP304lIw&uihW7BDn$@7R5Y@Vyd`os#2dY{ zEx03^?Hg;hE7Ph>t+a#4!?tTRVkjF)G(0i|2PPsmF4>pROWdvoFh7yMiiI7g_h@zm zg3Lzj9S&oF=RrW!#(qT#j6*R4a`HojNQdQP3hfsVB8}1eGusnAl=moiSf0)eNlkfJ z5UeNcq)M`k#1p54Q<>Mt4T*UH36`Qov_Puo#VSfjDVBDxp&rXH3A$(dQ`7~=5lLe0 zrx7EPg~%%C$u;r2xQy2s z*uvucEhJ2w>+cDgl0C<=@79y0lb~NP59Nka)WZB-BmTpg2_W(8F9SLNX?N&<^2 z9SWsx;0-}y5yyIyV15dX_#?4V`y|oN63PER!%z@{F7`0lzxT0Ik`U)Iu(~_U&t488 zoa`|YsdQZFSh2^Qi%g$Iy^s7K0}bt>TFjyx&tjFRmSX!#evm@@`V3Gg*&C@sA;nx( zwlq>I98qUQA`X#Z+cmZ|awtzDmxh5VXBj99|4q98Zsvce#O@u8_d(0`c*4{CT zo(f=r5*x1wyKdkt1%-{kk9;hp?%u#@EP%q%V;KPr`9=(G&R9qt#Y81a zIW9E>tuS^d{v#8aHataBXU}Dd%b%FTaq*_8cKF0QOdpPQ9|cv#X?(UwrJL9bUk?gl zj%J5M#&mj388aY!N%U&>Oue(OPbQUk(h1?8A3WMiUTPk{dHjOE2Z48{;C)G3DSKDj z%Ha)+lA(m!h9zE5l`Xv4(kUR3gy;sc%-ZYf%gWOV>?ZS7^K*)PBLm}&F55V<|t;fKGCHJ53R<;L2v$I1&G{u=3Ca*_}glu z5KeXx@!r<(hRQjNnj(vFeNQ;;S#POZ6P>6#v3{R~L?{0!Gf&WVX4sOS!Kw;dlii7( z`@4niwj)*3N8D>2f_!tQ`-%h7>aEPyI7ZE+eGk%HUyt|yWvrm_HumLE_{jR_Q};qwEeO?*&hcmQ)Lr7ElDbY+5B7T`GIc1A~l$BXn8KE{-n%*YcDimx+@(3nnRKOf4 z%|qkLMF9KcumkidJOQyTh#v#sQ5z^#b?Ct zQ2jM3rl=tEUc64l4i$H)_%;AlwmAJ`Iz;V+;+u}tk0(JE{faKD+dm3;w3oo zdo=nyHz7vD=M=w7S0i*}kOPU)baGJi@})}`%gYyDTDWv|8E9zZ6()Ao#V_*+G)Rvj zvLL@>=YijQi1ao8PNdiT1`>euaw87P0(=tTk4BbpwuAG5f?Vt<&*x7m8UUYxgKUCK zNhCG+aCek}GYk*GMw=L=;;U3pWKEFCC=OCFNyQ^5a445;TloNfEpeoryzwdDRiv~4 z-sB+LRQ8&n!H1~$bri+vJ_qiDSVC}ER`n(AiEj)+iQ`xk2h2^938acFxlYXBlrnU-pE8WRx`T|=U?fP=L(KK}mH z%4HEV9@qBr)5kR&Kk5AdZ$IWlzrEUwyo0{eTBp!&@RNUB#5;oVdo(lR1*}QTQE`@v zc`81SA}aI~-O>eTK`c`9WPkJ7i+CSY9zu3#V3~@~P(d+w$tSdZl8c_F+71=WHhkoU zU&=X0Z9Ny}6kQ&tVgbc6egXOH$N=vH!tNP55TX#`#-~i(%;%@gBjzddi1napnFq{q zbB4zi=Xo(vt{Idl7A69M_-81bJ+XEy5z8lMfi$|U)9pfXJ7Evz2`8U4x5H|UlMEtD zWB!=NO!eO^k+|CA{3!#ki&>{vP!fo$btBQ z%t3yQZas_wa-=tFn_3T2afk|HY<^2uCRs=@;+QogkRWH(xi3Ko$#hX&DRCH)4{b%G zc$E$DWXb41A~-*!g6K>9DT*j7li2*Gl|1O)qYkbSbwuWA`QFzV+O>d0s9_?f0?t9S f@q@pVxKs}#iJ4cA>%@iNG<96(gIcPg7Y_V8SD?UI literal 0 HcmV?d00001 diff --git a/requests/__pycache__/api.cpython-38.pyc b/requests/__pycache__/api.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7277cc1c658cc8b567d30c22652066b4a8c23a49 GIT binary patch literal 6718 zcmeHM&u<&Y6<+?ZB-3`B-${EK6a^>-6zRHY4}>d3k(AnrA*mLH298?oaAzovwcOp# z48>Aaddj7j^w=JXB0*2S^j~Q6Z}8efn*X5aso$GfQlzXHj^O|(_A1;M}QX7(uI<{g}79~Tka4@5x}<3hi9zgP%FNt8bc?w7=? zVnHl^Qn+6hOX4zmE{Te`g5L%4nz$;i;mM+SUA!Q!qqQX75HE_C(7G&M7O!AbMO@kq zYHyzWwW8&32GUs5;F+rYe&(oD+C61hKNW*mGS$yw*_Vkm%=RR^m%qikn##6FNoB5e=Dl|5-xTCs{`Vq}Z0WjyN1n{{TSWOwhZZ*J{w zvTmv|ot8{U%T;U|m9GjgL9H-eGSV28CMGJPV{(+yAX9#T)BF?{8+=yyJow`eKQ5h? z9u&L5X>eK)g})R}i+?J9TKHv941N;)BDfd)E!cIx9|ocbGnDv$E(gIn#9KXxutJ^) zD3%DcSSyOTF|9CPF}syFRzs|t;b>)C^FHfiovCQ$ziusLs6-CWiwIfTc_1U0Dhz+V z*$%@x8rwVVJKI~k{^s`P+J2}YZ0gMAR3J4};y?mm z=@ElbRdP60<(xg|6&Kv{C!D?R=XPf3s#vbX>PSvC@D`pZHc_aPijmVFj(Mye4@{cO z>9NM%-`(EADz)OVI^i9J#LfsFPjY8I^;A8{h16eXbJF&9yS>A1`(e{*h6-Ut0DJ5@ zqBK2H@=@XBx^?&LeXh^1;K@Xj%^wv^pn=b!br@C?-j~%Dn|f!(d&Pty07l^b-P{m| zs*l64<`cmnwhDN(OeRq(2tq(z%7q6k!7L1&X!jmuMm%91iIBay;=1sBxk@9P9(AV& zG^kPR{P?j$>Z}^2iAA?@yZ#pG-i1F8@*m<(ZJZWe~|myX)x?bWI9+WpJ~O- zgk>P-@;qp!lM2~Y37LRAp1;88g$bGu)v-+63SIIF!}%E@mNS7!c2P2MI0@ojE*C=i zh-bNy)437ccsh+=GU!~+-~-#6j}>(I6hUZ(^(k21b>{jzhfLLX@+r_97@d^7RPZ=X zhX+~;r7_!l5WSnGvE)gEZ92O(g)+{5C=|`SMl`7?G*J(lJ5&mqz63O>Xh2~v6mi&K z8?wt0Mb27b*wzC%trJewNA&1m@>xYQo&NTs^pQt-pJy4F6vJ#9r7@YG07{jIssx%O zVKnDnxJh+KbKc!VE`#$pqxrG;I_2wOPenb%v;p_94)Z#V4Z2ZhF?cAMmo#+`Wdxei zL!oL|`|d7_q_(Q7B5q~wYF~~nn4%9*`uve`+nX1Q5Mb+TtTRXiu#9C6jC`zpJtx3i zlK1D6oHcxsSZ-vpUu&>CUDl>zUT4cy5>`d(JCdVn4X-PmJ5EbG(5Q!J zr6$)HT*$W&2tK!a^k{-t3t-Ht8#jKn>bh&i@q#CB;}VF| zR;{FoWR4eiJ?9)R=9AXSx`YRsXyWhKS1NO_?g=^e2yK1Er`--&@GDcX+ae{6$a_O#0ALpZq9OH&WVxd zY3=_lmE3=8|1IyM7TeC*EQ8AilT8e_h{W9NsE?K(N$fZ%i=Y}jE5MVu@%3|wwmmKZ zqvHk7wjUP96(cQ5JSA)AT|kiBsjtyxkuHQSf1wSJzKY8`I6Ww?TrIesh;~bpgD^bB z#P@M?T6h!_r-e@oz(V=ts~iXk%Zto2M#J;t4SfZ+Xpa{?xiI<#+&(ONdtYQ~ioI=| z-qe?2YU+7AXE^#v1f`T13#K)*(pi!#S6cyNT% zP$~|tYP$dWvi&-qP7o!4shLt;;{TY%)82P*dg2c`&k>wT*}!Ut7;^Q)yu`bXi_r51 z2A$xXpoi|C5a4Vg`|L3^Ph405WKAEzE;?(6}PtN z2mh~(7Y`0Z8XX*r1Gj5*^xcthM-w%%2$%RZ#jhrJI59JRbJU(#{dnn?|6|K4Impnf U<;vC4(v8aXrE5#qEAHR_074Md`2YX_ literal 0 HcmV?d00001 diff --git a/requests/__pycache__/auth.cpython-38.pyc b/requests/__pycache__/auth.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd9ce9623a799c5693e43b9ccea99afe6221c0e GIT binary patch literal 8322 zcmdT}U2GdycAkF@MlIzRjf#P{>Sr2e#grwcG2PELtcukbtBf^rm7J(E{CuNODTsW= zA8!;ZMUglCiN<7Q68UUpidmIuHpX(FOO=@qB$j7|&m~syW_Q)f9LnRYh;q@JLwO$M z2{wuHq&JW936!VUG|JPWeSs+tq|!|1w=C{`*7m|ETz1>hmi6PqKg(L%s)cqVU~S*C zt3fkzYt7J(wmh3Ud(Eg;b)#C)w71+Q^F1Eg8v(cP4`#HUfnid)Bq!PRx2|~2YQVf_ zGO9{yVz#;8a(U<_Y8dgvT6aV2g4ddx=sxQ`iMU&hoN5qk*Sye)0;leBv=otRV$DaM zGweMf=5bgJ1GYSudAxJlZMFRV<}N&0di?nD5_Z;D!lLOmSZAuy_M;kxMi)dAb0fE^ z(1!az74Hn5TX@31KoUu@6v?|vcs^3tv=!*~nr>Z!E^#8_FI=_i}q!ZBFVc zF&$*4bMn3ywK-15wcmHcTGd{`3E3%+mOFo24w_48!ET2hZ@LXHv>(@e-);uBA2c^T zZm)ZG*lx7~9(l~JHHYVDe|PyJ{_K0cheIMcc;xX&h(WMk_o|V-6SQA{AZ4AY@+ z{LX3KE!k0k?ev$0V65*%ShYU}3y>f+J(t@XZS36>-5&)tX192-?t6`J87FQ+#JtUb z?~D*ZOFa%a`~N^hV)&5Q=B192p3Kik1+JCyiB$`sPm$ZKdfY%=Vm@|x6DklU8s!p$ zB-4$O$=hCwd#SGdFzv#18lXixEiW-rndT;{IEiqg)q`3y(TVjfp=|v#Ps{=1k_@RZ zI_yjQ1cp4nck9~2PtYxVxb6CG;x9_Wh+t7dz{Z z_?6DX0e&20XyWD8PBQL5scTh-%A~C)u}pXoiDX%_CTptl--@REr;#z0v}_E@swG>f zvlLC9{@T#^D;QIyfro!Y=uJG~JQ7a=97_bnfMJEHfJlvL&NfW?%uXBY`yXr%B zAbW0PyO4j{on33RAa4LP0cW8N3<1R08$4)?U;?d4kHCZ+G7&C=UHK2NHr7(gos+bf zy#?3;ZnndSlnVUwUC1*@^He#AJ{&COu~p6kzON)&==mFo%*9cp$pU6iOvjC!W0O%XLO=RF*6D$Z$NE|(NOixxhI zWMAd4QGQr)h%tjhs5NVmhTY9?{jat>hr)oajh-bYYkqN>GADn~=hXXc`FYvE``4 zvE}I8ZqX>x=af(2jj{_~(QSJj8xT5{D-B01+fLlo>H3C9@3KJbUOd{YHkd=;64kt%kN42}?Qn^W0iR5kV|l!@tiZUBUhyK)IF%7qtRU>l(3{9 zlKO14#bNxydPsDi?(8J*q*ab6@)9OI#9P5u66OF;NahEjnxS;&Mrk|%HZofVKgtiq z^1o$~ROW#KSoe`I(UQRSZ(6vP3G<(T}BD5LgLc19yT6GH~tF0hR^38U`xpMK6xH`j-+FbQ^8Pv4!#q z^28eCdtesqe|=aYZvY-ezfDIe!;CMb*kH!C$=6t>Yc|c;AerczvB@&qO6cQlZY#XQ zvdsEi>Sp8Yms0a4YR6FfG?neH620Z7 zWb>dT6BFI$hB_;CC)f$dc5y>Ne)0<`p1>Sacu(Rz9j83WZYxh^{(a(sCe z$7-5vf$AKKy+ZH0g*j#-_(qf`&xoA_w`SwnSWENR0ZLK6SJ;zAYQ}p-_UfKY?a@SU zGM?R&dsBO$i=FyII-9wapV-Aeg6{cv3Ok#l{nKt^{iz)OG|tCUphb(PiI!cZJKww> z>+Ch`T?sYzTCDe`nY|}(8&4H}hMh*wnRpKM(;GVWeS*fvC&W07c$(7gWZ4c~Y;5#v3aEL4nsafXU!$YuC#X43E7LA%Bzrwx zm?fL_jn1o<-Tz(V}k?5(;shVrEgzbtM1``F4cviL_tn-*@lmv|XjPPW4_YS1jG z`ExY>!qf~V>1067(&ejf{VFLkuo{+lIZb0}2SL_f9wF=PZ*xe8=1D2Ldg=17nldc$ ze^Z7Ig0jE*B`F)sbIfWcGnUjMBu@t@1z(3^M1)boT7Isd3$L9Eg(Q`fL=F>~K~u80 zTDQc`QW+y_ATh7Q@n2`|jeV4UhDZ8#YBCu9T)yyJey?MSm1*%$F%_qqu%n&3_|6kf z-t05-NS@KI6U~J||6E?|6b2(O2>+cMgILwOXU>IZ7Ht7V{k*`Ve!k6X!yaKPXoj9B zcJhmMice{=xCoEGk6gnJsmiBCd(j5Iy?ciCgXNmlch7~3cJ*MXPPT92oeN7dT$m*{ zi2@Z)S)wC&1}BV8B+*)Kw3X<9M!s`kr12YQ;*C}l+L#%iH6s7__=eav3!hbwI46Jr~3!p&5>4=M6^8jl~2{#W!(y)Tg4 zIdz1??IQW%)Q(z}FK5&?ojJv7XkOgqf^IFxu+Vv?Dw-yDHlB z-YnILeM!AJZSdM}Y^@aK?vA!Y4%1_87H|i&Qug0)hXmo5iT1=*J>gfpT zOZiSPG@sVoZA7&W+&|J4!Y3OVZX-I2#uR{-2y+(g+6E%x9`o3;HJoE$mJK4U%Mc|o z$*@Q5DNd&LA4pQ+=@jXU2hG?I1bF3Xwe;4hg zQ^^?lMC}kB7UH1->_`Cl3QYyA7vGJb>jB%L0H(LiuTt{{HNypc?D0h5UXrN>jaJR~ z66?bc%6IRt-dcIE%Cpo-4cqJdCgsoviU2oGulfPL32D`!wZlpH5ZCN~1WHuyK2F9F z2nR}egIfM$bmHX0@)mWsFjEtHgs)PxsV0NHSPLV3KS&IG6hcKQlRAVpjrlzqV}v3` z4et@Lc8wND6z5np&Fbq3Jjr!VDIjGlO z>SAYqC^b)NQeF$WSX zB&{^gX@jY7h|6vCI8urD0$uQ_!b1pC$W8%K6JA?yN1lj(CwcB|;*8*nQ{bGF707>| z5+QDTWy#66rvqlCM{lxKnQI0QYtL(CV&G19=^F*cK!2{ylh%r_0t@?S$| zzDUUhN{(!h0)%hU2I&lkao3q6XP?Grk8Oy~UTnxh=9>myML6;zElI=inc(2Q0C3J>E5yF+rCt`BRr z=7-`V7)&vd7btV`*4^^jYI)6B`}E$b^S!(GKXyL3|Iups*4^8yx0AV%){j5=XzfGi z-pc(o@y!YDt@ywxK7xqIEvK-fAn4GE4>xOBft_r)zeb?dfi-dsa6G3FH_2 z2`~AF9Q=tn>LvLFPxj1?s0CegJ^H<>*L8Mqz*&r&+v^YC6UKga#lMlp;$=MkQB0DZ zGnvR#Cv|$BCJguF-a4g+IKuRbT_p*VqxWn&ZeB+zPWZ7wAAD9K1<$|y7o5*m%0q2s zqY(&0L&if`AvnSciBN*XN;Epa@eyh!&mff#q!vw3)io#LS0ae~#Fj$mbF5wNlsi<$2ch-h=E=okLr*XAKg;}k>9?A>Z993s T+v1B+?BegmG`jyFog}{j#I(X_ literal 0 HcmV?d00001 diff --git a/requests/__pycache__/compat.cpython-38.pyc b/requests/__pycache__/compat.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95459c110eb9bba8fafaadf9808e6223bb7c421d GIT binary patch literal 1638 zcmZuw%W@nw6z$i{^z`)1`2C0-(@9Loka#e5sG=x}f^t}d!X%YP0jepwdb%~XY^l|j zG=}klZ{RoBux87a1s|X-8~g(-SaH=oPKMy_>Z5b7q}%68_r7em0}Gzt{`%#+zw4Iu zr!i(9AB<1olYiKjm08Hj?98Dyvdhsy4t7pl`}J>d0=uw2(b{?}H% z=~=jW8VF=t<}GJ@?0^gW~Q+N|1im%WcRN@YIXn^C;~N^u7imnz}kUHWiV{VV0$_%5gi z1g%E>eHcOe$VNMv^W5sqU7Q61pN+9pvY&ERBr5pvzfTanI3_Y;JR4IS9VdB4v5W{S zxKL5Gg$xNLY7!ACAyPEN>U)gy=;=fq^E}#%U{5eL_WQx5DXyyEDrB$uRxd5+9nh4E*!# z;G@Ay3D+_>O=!XfpB2TUL?weSd799{3(Sf^L5dxcO9h3ubG~~qJQoiy23O7gnp7>^ zFDBamxN7oGOjpV;06gD!{6^gk-OzFD(C*lsdfk5DZUXn=3!OFBv#U3-1E+59+Gfto Mg!YnC&G@kPUzSGBY5)KL literal 0 HcmV?d00001 diff --git a/requests/__pycache__/cookies.cpython-38.pyc b/requests/__pycache__/cookies.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..196e4690f61b615b47d94485cf26d7acc0861ff5 GIT binary patch literal 18817 zcmds9U2NpmbtXBS8EQ1rYJa_JuWe@IjWv#T>^Pf0W#iah$8j95;&|maR@WI(LrNNH zW;lCE?OFqEn`GUlMUkKhTJ$eXwQU2`E!rlJLD3)u`qDnOeIn4Oyd-F!n)aba5cm7e zB{@GD*>#bJq9gDQ$+`FP-gC}9Ki@sni!(D71E0VB(O-VyXP-2TAM#@Ox7&87xKcC?oEO`rPI;x=aaNqp)XG$+vQp_xuS}ap zzbY-NE7g~b?pfOiX0|J;Z5sTOUuIXTFB@I!^TsC)zvP!+GyHOJDP>vV}8}2!S`|het*_K@|wAF0yXFSqo{c_IEj|0aDB`_j_cz=dF!rV`nJ7t z+CSl+M5{CYDgQ2f-|e6F&*1wW-}UeD&!U&J{=NQvxO1;>T{UX&>i^CGig(>+=m%~bx@&>!t+nNJCkotE-M-ygt6!F$p7qqK zd$Sd9y7?n2gQxApt#;(LI=xWEk-M(Kj=LGCV8M-~=ccEE^_{kEpVq2ZehhXQOFuSI z*34us4jWyJ>qgLsV%6$y)QZVeK8nQIQSF|mq9Cc@Y^STwOIJeNYA3T-c47|yir4F* zX_GwsxhRP2VAJPn!LY?-mblLH`-UfT6P`SOZ-D*O(7Ti`n zs4uvFfF3%nZs6YB3}P@IgG;M6y)H#73gQKK1HA3xnSpfahaIogjofgZw=$`6y{<3U zy8Y0MLe<)6b-i|8GhExk0A0*L1)hH)?6!C7dhYF3&`p1cdSN$eQLxuTg+aC3Xafn6 zA@I4~;3h=X!>l9sMhisU2;v5AHqx8(wN)9DP^qT^Hs%03E<{a*J=OB!fKPro+>uUk za}A=n>)!M*2!NR5HC{KDYNf<#sH8ePc2z{%qyRBVN>LMI4iY=w?FBU}v9W5_lX5Cw zuN&$Zs=jgc^5V4@Baq_Sw%7JL*PiJ0p7LVv+H+ykYhPOqI=ySXR__AX7JKdXg&U9b z*KVkb{cD++4o>uxqk3;QaT<+Qw-q-U=kbWBgu*aO`0Jk-68m~qsY!K^eR$482*H+| z`c*^G9@I>A1YfV4Y7SpAeq9{LW#W*Hoa;lliiEgh56mhYcwkQXSu~CLNOQ{qwF}vU zZK120=3{60uu?aO+hJ3kO0lqZY^&>;izJHkN6Y7o(qkx8G9KwKQoQUk1`_P?t%;=Ps#H&UONrX(o;GQw4 zQNL>4GUKBD^|pD-ic5ReRvA)kZ82gd87_v}Zacr6slng~~B{nSEW>SK2@H$aage_DMz;)$qbf6My1(B7+ zCQYPP6!_x|tCA)#R_zAX&HK@5L{b}tS<|sjm{rR%Z7fckf2RGj{^@aYW=%)vI3mb4 zy7Mvb*R7>x{q#wEJkp99JKYHmV3fHoAU$Gm z?Y8-4IG2#wLjQ`^aj?KHChB%Ww;gP>;#LO+!KDvHiz61c1EU!FA9X$7cU!T$<~6t7 z*1EeJ?zk`nz8iLfdjDgWwQ=2A<&@80RoYgm)VZXZ%<6q zo_Psgc*;6xDiI~U@-(lC7zP&Rn4-Mvkhv}8qcZ#N9+s{{>L0%D#tevhA9|jsLXS`) zhJ1j}4h>;s>5bJp@aSB84rp*#uLlo%e06x)Q|anuZ;k4j;Y#o9!CY~Z=RIaNK0oyA zk$I2Rdk2qKXAXI|dN+Q!HuS)`VO~>V*WlzFr#C@$)MzA?Mxzt@J8hm<8;$EbUOWAz z+-Ue=v(Zo|_%NzHZ7yZv^g=Z4=^Cg77POD*B8v~8NE|wLfr@w42YK&9EI7O&krm;k zr=g}$a8l6&C>+zCc8X3x{$^QL3XaWh%dDh_v}mI8$cLixIF4u@#s4fqffkxds+vsV z=LY>B9QPQ#J+r+0{FNYz;J!}^9_m;{t?qgVTeq4z+x2?==B5{a+ zFnvDShlbeBn$`c6E8aGgqdQCh(YdR+VwM>*I!LF+=x9-#gKX%F=wJe=5C_5Q_uAn` zxD&YuU)65v_h$`dYV2?6b+{|3290XkWGbs!e7KW1TaSms8zqn`b74BG=zc%*& z_K88;<}h?tC|l5s5arPE`xB zqvxhEeUx`q6NyWEN&~J%7Yo-7Z+5ewILfd9S4YRAZSn15+=q#c! zxk*lqwQm3qLi-zNF3yEfu9)_lmStOxRW%izl@Tc};0BM&Y~Z2*x5-)fB#>o`vb<;Q zTX&580)rPQPpiKr^m$&vi~#-sR529Or%X)OyQ?v&6e){{FNj07nsNeBJygaLJvj}+ zEW&*Bf1%~s)~&)VXzP6&Jr$rz3i$OJqaKib z`^7E*Ap%@_7Vg@vOVho;lhl!SLmCoTA2$Pvhzv2ji&6?}Q>UrQrPgc*o|>-#l#1NX z%y(d8^E9=Ij-k> zIqa?jgNwz-@Bi^K!B!U_s~g`WbSKa+RFxOIoghe+mTCc|Yk6)2(~FoAAqDWhfjVnz5 z5a8P+0xhLuDqbI;Q~@{niGGA)gi`M(&JSo7kgi`6P`B(m_%8WVeg)rUf7-9&+wo`o zS$t3VNBlW_R|L{E4u^F8ivsDID@VR!teXPqIxBN227eTgu!~-f`S0@2;rqDMdAI+- zYtG6EIX~~e2j?gK5BLxI^BCVL|AT(be+XxH`S0}?(B`y%!LQ@{jQ_Cz2)^(3-{(Jy z?|Z%iL$-1j67Zqq=mE>%eHU~ZL-pHU6n&H{AzNc31_ujmCr+I@R`DT9qNQ>yI9+4Q z0uU1@4hBZey$KTtuL@!A(0XOGn<_(eiM1NRnzewwiX2Mse%O65PUT{kQ7`rfT3u+~ zDD0q1fo&tV17A`5hq+zNb~D(o^py6U-=`nB)8p=g%*Cbqj=cmtJ)KBKG>C7&SUr{q zozvpnX}5C>9B1(S+PffM{9xp6?L;x%N_lQ`WbZ^Q0L+#&NXL)W=LdE72`H|d8nVZd z#`8BrwJlwR;6RFfuZ6E_Ym)T^4*(&^jTYm60b4;NLCRNGbA7i8lM~c8^y*q&eeR{_ zhkg8(j+fxJ1E`V2%nI@%&!qaOtx1m*``_iWMI$&+p zg)h9gD3k+L~hiFdlUH z3w!pR!b|W%Z6h|Z4>zy(;r6VO@iOjyJ_`qa_!JmHw5#0h7th zSV~AfWQ3SG1vB%tAW}-&X%@4^5|I1j4Pr)(ZrJd55aBhk45P&Pbg--Oo}z}s#fZHZ zbR1Z+F{>q>#@~?Kq`OhJ%sF$`?4Qh5N-j4v1qb|Ug2JC>V`2p2G&pLWU^zLq4~XoJ z{)SuN+h4W0Zz{gF6&K)tUjkqZNd1=Godrw`?`$1T!q>FtT>`&N_-Zz^U9tb20asy& z{tt7N?BQo=KyXA%xf_9ChxT5Go>U-0+(N;sRwQIH2ZM-!|dnRpH&4TjuAmKXctl{kq>AFpIthAjc@<7&gv!6l?)}Z2siMuolpM^!h}x z>1jv#vdIwz@k;XBSTev1JS=MHA9f^ABA-HwBMwd+bZ|YoW^DcB*D4YG;1L{nqj0?>oCSjZ?d7PP( z;VpRlKgVSe~k*U8kG5M_lAxOx4#Nw|WA}G2N zMdp7iDJjk0N=_6(1;sU`)>sUDQz9E!w)trP71sj^IeU;qgQ_EhdOI0;Z9tks$dXIR z0gAjMF&U-CxR5BH(_d#XX=|#apZy;sLMkPv50XGv8ks&TbBR#;vKU>flVe()K&8MI zj3|{cX#rt5T5a7A5f(0>3-Wk3hK)oPjMmL=P9h!bKqA)8S~z5c->aJ?YX^O?0$7qzkJ;VXDe@+ICaAAl8 zT9AfyIxMS5=q89dQ9_GC45WFb0$T3hbzszWsWU=t$|iYqnatwO0ALa`H+gphO9Dm+ zsHCi_d)Nf$mB1dj5*7dGi^zjwR+ZlIBU&U%oy3z^Bh7h918>s%^;C|;Q+OVa#0}ba z>}cxB6HQf>-C~nNQfOv5O~T`bWt&x4cDQl(9h~lPlb`FXA96SyJ7MBQvfS-{10kwJ-TVc5=_ zEiFH-wn)Tn7GsubE_-q&BKMJG^ht~oMdSq1Zy+S+4hqS5!vl_gzC4IfwhRF0nMZgL zh$1pTww>tQo-ND7!ZIPA=tLKB2g_xfckHbKgSsvIl5xGdM z9+(5iFUpzfP6Gr2<(vICpTeAV_}^%N22}2i1{?*Q_cxsKDzJ^VZz)={ys2RGa&`u< zHUmKl#+EZtO*pBL%0a*{ol&~#?xzT+0&;D7QLvDA#!L|Ee!)b6YcX{)b}>SXEj1Gi z21hq3-#Ii;GZjN|;la2%m>#0Y6Xbz7ZRP&G2iN~l&5@-~!skn$x;jN?kfftydo1FA z8fF0^NzH1Wf{d9nZ1Wc73frE<;@rb`jxaRf*7 z912VqD1vOtUgQ}v3jrM|eU9wx{M;s}e+Rk>8L&m<6GC$_qi`MI(YFLVf*mO?B^AIb zNI~bSe8W7~x2~N_jGCjUGqg(P<`>foeW}9?SlABWFcE7|v8h*3V3&?UF6ue4iLR(G z@Op>EK&i3nOQZz%)(rCjKxq95o(Ncb}tLD z&26+h+Si=x&SeDH2Lb4Rrtr%XFZO(*flxPCEpW6nsGJ7O+THg6eKSG(vcOEw60KC? znv3dO0;yIo5Jes(Q+Xw!Nn$s{-tK@(A$&#Lda|!YRa{Nb92yU-14JfQoU$*R)Jj9)eD11gmGLr!F7x6f z+#mct2}UdmvwVuXc-&QnWXJGVwI)TOQhP+=sS|REe-#%#knvpVpOX!HMxg`udO^0~ zwR_FYdMA1eci?7ghg+hV7h;bziqWfq&+VA*rY$O8f@>sFpVd%uo3%< zYtYc@KMnX-`FaD+1sxErv%b?6-`UJH(f(2xFq}Ub) zS~~OX4p1M)`pgr;P898e^~S|~+f;9C4ov7Q{8f_a9kkGLBnlYJncZ0166>HxOf z*xE1L0U;Vt7c_{9$wlf{Q83E=HD1u1CsQypaK~X4lHv<^L1YgH7PyA1k64FOKv{FpwWTRQ3KZE zL2-fCC!wwEw#iFwyOQqM(C0F5K#u}d;hmqGjpo+aRE4SE;taK?Lzo3~oUn(&p^VH8 zjV8V8gz>*89Y3Q;E!)rY1-xA-P|%e56I85qya7qrLPUS@949}gih{6bm3h0MxPo$42c6$IvwjEU^L_+&4Va$Dc z(+_t{=1RgHeR^}hAirilJ;EL8m?fDYq>xY0ljW-iIwH|Ifk~Ni2j6~@ znoQ=PZF;+@%3-ZPBfm)(1DX$nqJRbLQg)f_KW^3a|rw+X^pwG#!+0Cw#w_M_cc=OSpOf=L~v~v zb({keijUDWTZrpu@A9rT)0{;uJyIFvAJ22Ej9+uLL*9bI%EwrU!j4-LO{F0!wNS zlzZiH$ljg5dS$qwjm!&qlp&eAm*)I}`1%ATi| zkw`1aEy~BWwmvq&aK&I=VpT<%g%2<8xwsk(*OL;ahy0tQyyp3mB2fT(Ij*u9Q+|)@ z}l+vyebGHQI@=$^wp8+|Fih!-pEeXO*7 zz#+Vc^@SCN9jh{~OJ|JO(ciaCpLZP8z)0}=CH_AFUeq-G3eKj_7+B3^sjF9^ftNE4 zf98zQ`#xWH|LP1@*a|HApT3+G>U&p)$x{>tL{ ztL`VSEUQ1n?@te;Ui~hv6Nj&H@Fj5dO$x}&*6*fAFgUA^GGTxe` zc&OJ|FsV)ANjQNeo$n(PO=eiJ0aOh@)B*QT{Q{c(3#IcSPK*LGi7oRi+=|oG==hs` z)5eQMCAb+Z@ph_|yh6UbS^0_dusj0hGPq>kk%;IEIMMqyYBqJJAR5T`UV^H}|3=sY z5Cs8+C<2gr01DoUKFpUR;41>8+UNV|$Ryu`_}v!uejRrqFt&FYqt>1avCG^kPjM*f zl7~gZ4&@Q1VPFa;Xw@k0@}f&@nXX&j47f~44~QZuBA7oxLnkAhz!Z*P3Qwf44eqlW z98>L;@2kv^D;ev{kyBYgnikv+QwZ6O%irC?&(`5;2ekn6#|ZyG(}pATS#z$h_> z%eI#CiOAih5$!_Xt$^by4l(Q-@G4%ZNcM#Bb?Z7}dC_k*Q-mA&K34saiM~SR5;sVh z(E8XEdX?OS5|X|84{9oaG!1593H|Gr5WZl8@-dubtq>Z}#nLVtK@m+7pSu?>*5F3E@bjrGDUqx+Qnq5)XdpM@h=($NwA4a_{0 za~aCHm!_X$RlG7Q@GPK5x;59Y>3<2c2i{;?_#dBuOzFvpq~`OIOf{9I!(j_ZZNRc# z&hXetO8~@L9Ms74?%T_4`gpx~vP|6(>~~H6g$8$EAKeFCo(wUCgW>QsgRkS&oOcr5~! zA|Fx7o1_v(yjxARXc?*Y4AA zpM44U)L*gqE{fy`{&j^0d>isFZsea1@HG(bCu+}w%2d&HmQ+qf=~G{0+iARu4WCF1 z#mW(Xr_+C|_jXvgYW!YuiHmpo#ed zo(%{iXR@Z8OZq+8$GAFV_t(Zk!XLS+Q(IcI?DV*%pFPdaM~+j44u? zq3lR-_o7)~?|Rh-=w1c|`VM^!-tJBN3cYFnXGqBmva2nZ1}Xjzhn(M>IWuR*ud3CG zh2x+9`o|yNOk37>B$7t~VjZXcYsRvK)v`oJWZg_R+sb9E-&i6i@?TpbFZ1tmtpapG z6rqc<0G)*{i86HA=pyu#s6ba_5zkA|=fpJhw9#egs+fVEG3BSAXGIOVCW~_FUA|R; z<-DlFQism9&Oy(KdFXlb#x(Q=u>iebbQOA0T!g+T3vvcun1$t%Sb}9Kw$xx*7Avr< z#Fq20TozYgxe{CIuv`@k7ADW5-Z|)NVikJT)H4r#UEF}afj4rf=K?G@#VuHF!Lk6$ zA}qJX9a!#|cP>J&iM!BujlKl^iMR)SPcEU3W$62&0o{;s<9oXD)yGGr((E{zb$!uyC2RX$z#UJspd*=<0rL;o zcKo5+?CeX8%D*Wh)-vJLlVXONUN1WtYk%|mHWmzK*WgL*X36J z8!Kohkd`V&p^P*0oiW%m6C#;bR0CI!#&|T z-XR={&f?U6z~waiHxtj_X8)8s&7E3y##nM9`)w8~dy2cG zDYbcOz0T^*0A)n(UDoK#Yf~b~2ZdAXUG{bH6z~2eld(jW+Be*M>N$bKU1umqP8~Ou zoj5^nB%Ol&1uoM?wD}}k9-&52KMPoB8b-3n1pu&Rg z_oPzP5nd*5jS>DV~k#;W_Y|SZ9A;kb`-U^pSJBceeT9jO13Th zwr!hw!fM>2wsc+CsHdY#mF|eZI0!SHrOAioOg?Wc9|bf8&N>yNmy!W(Iyk4YFb@|u zJVs-J!STKt$7bWj`@Etyt2$lg=rT{2Gahqic<4hbI5pjrGv>o-X~Nkp-}4YP82M&= z4p%nVndu`uweUkUi+I3rj%Y}yrm4JC{@XzWvbe!|q*5?kM4dMh45U*tly`QXMFb1z zB#0CMmr&uI1OVyO0HvnWm3}`M{v8wBXjWnYjDo@3sF(?AiwErv&0hh{%uyJfIIhdu z^nmUALC2JPh(Yym@2k;ZOHA~QxS!+d%yjZ&uWU;$#^p_=I+`zE{BUF}!3p;gP9dGz zDKj6!4*&eUzKQ4dKn;v%d`eXm-7N4;vQo0^2b8nau~e}m<9UR#8L|XIPW0RGbi%D1 z*+ugMNncSCAOF!NFQbXSOf;Ev>L#B{ID5EG?TxsZa0K)G021>*_XbR0x@C@MnusaZ z4+1oRcm2L+ax;ZaV?qbfB=UKJmUMdBSKH6Xl@3zu@a_gXMhX)QMuzaD=%WYZ!7e|N zjE~}a64zyLtS7igr{=0Peg8R}v@Hb)kRFF~uz&)715sp~L~#YRK1xuKPEApL?r4pe z+-Y}Ymk&Rs-Xz~>quCwVIBc*^5cC+b0sZPV@>@50YZBg7;5-VQ?Nh0%1gJJ?(d8$3H>b z8?s46*Ff+xK}0$=Q8gxdf+v~iCnxU22{$tIDzvP zxI}`JbZXAZHl@f2Z+WY)1HTJj1Q=;(#|8V8j3P^VnlBLW&k^Y(n%{1)1KK%LQ29OS!ELnW zkZ%L}$AG+N#_R|X=>wkaI;K>h*}mg(HDIpv4uj4l`nzB~`GB4*sp&`CL;XksRiLj2 zqjs!gS?_dvzM@r<7(Zshm)g6H#+U_kfPp37_ON0=DXbPXc1TQd?{MN$>JzY@en3x_ z7i(9|Hm+6gE7x`Qf1X%{n{8WIrJf%DlO!xX{hb*=W{0V0E2U^Jqi8>&X#K2cMX6|^ zrf5~9Xi*YYzN9=9y-3Rd#ppuGPEk%#*XcrYqM`{XoN5kwacLUpibk2Dzzs{Wt4!d8 zb+`~KCr*3B(F5ueMNRv@ooQJ9Jlb>rDu)vK0WSICbgovb<>SApdMUcA=c7MTbcf$! Itxzxg5544(7XSbN literal 0 HcmV?d00001 diff --git a/requests/__pycache__/help.cpython-38.pyc b/requests/__pycache__/help.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8800341474014065395482212f5cfe782c59e453 GIT binary patch literal 2655 zcmZ`*UvJz*5VzNV&$(Rww2ef8;sWjUL~=rTAotd4T`FXddryT{J z-~aykn_s3CGz(HQn(o)UnyP%I#ICNenO%oJRuz1y=GCChLYwK;?2_vb)Y{-r)U-W^Xvkh-X;83cK$%>*;E>}QT=X02N82SNi2de z4&xoSJ=k$MOA{{KE{jseuVszqbJ(uZQ1e>K!&vy+gSaEYBwl^47V;_I-(x%jk?+H_ zN;*tZ7H3)1!G)8D0HZ^@1ugplh)|xY&y;;-47&i|F2b0=SdDuiAs}f{O=vy6XGm&F zYHe!^Fh}w>)ps@im>N$j<5IFio~fYW8fY*FCd zL-aL@;Ox+SspH!^+OFSp>*dt?qFXOLK&f0<-!C1%*9|jwa^L|Xh%b7nkFoZ>);Zoaks(o+eP?*Tj5Ducn(_{fU^vg z?SY|ir-*6RO$HGK+b&IFCe?|g(3A{>j9Vvx=)mb@-5_O?V0ID+JKBPS%9xu*fq+pz zYf2TlRR$r~aR(#&O4gLW+}IA|Aj*~8ES$`pV;6I4E8dNhhjD}CIz}_M$^bT;+@6@o zFQjsN{^{(1aw){+l3dG%Sw5Ez)8SFMgzSHp6LBi;V(mh{kwkaW@{^h(roPpQP6LFlBS;pB52)7IziOB$NFh24b!DC z&O{JJOM7pR+I#%QsKwcX0USwo%pe#v(_ubyW|J4On6Cm+rfg!Xj%vees^$w*tE#$6 z6!Xy53b{jc!e51%r%<7SOQA_r_yd^HLV*iKol^A)*+(S2q$@%LB)m}~A)%!B4{(s& zSA>25r~r6j^vqD*C%{>f(-2>&30ibvb1v+jNiCq2p0fZLfpVBTC92e>#(@TF4LP1Z z8k?flbM`H&jV-{N3jc{(V`~AdQUhk&h#+HI%;0}xeN4vM*ch9jV>U!&aS&NXSHEB} zuzqfp=g+GU6J7(d$}v_AA_w_Q39*wSACW7MIKpeg z;WN18B_O#|X!8M!y_rDPvj;f61~|RBc_+69Jc`2hJGs^A2AndHS37)|ie!fe zX?K|GA&rCFp5#SiuCQ?l4ZVhjtd+H8&Y>>xfcfi4&>8-Q{IfLUX=rO0gb=I+ zN3Rj0k{4COgbHyqNzRz(ozm7yE^gMEJA;H`zatzZ;mPL_mi!g*d zFLJqw*BTzSiv*fl>Eo@YNYZP#Os1JkR}AWrPYCm1{CEqJlc#Hc*q0ZkkMQ1;p)p;p d0_w@P=t&h$dRDCg_QC0*OtUZB@7ac}{s)jr;tK!( literal 0 HcmV?d00001 diff --git a/requests/__pycache__/hooks.cpython-38.pyc b/requests/__pycache__/hooks.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b581d29922452e00004a5c45903eb914434912a9 GIT binary patch literal 975 zcmY*YJ#P~+7`E>#si8<9Bo>6sf|N>)Iv|Fss;HWwQma;wcBxV>?%7GKTyj_24N7!j zX=fJx0E{e1`~?1nR~G&N3lp#1R88O0`<(Cb>*xGFzB^f6B?#izpC2Fo1_=Fj56b}@ zdJCyv0C04QIPT+9%mW@?qtk##JcfJ7lQwG9W6%)yYre+{$#!7ZBb7>?Hy+V2N{X&d-F{-E9h*6V}oe7gUI|mIvu}Mg`tAs9# zu@>F#CZQ0=AbQKuz8DL|jNr7ViVP<}>`EiZHl%(6AW?lRB4c^ySd7X}Da&;^)`sQz`eb8vK2fh{om;=}2HuipIkjYq%gkiO zsN8m2(g{F<0=Q-`x#+RW+@v?fTe*9S-frHlXkZ)f9Ubkr@Ah~zib2J$-dq3Jsk`P7 zR&~HEbmK+1w>oR~A@u=3h08=DXU$hI=^J(XMmk0N)Ms-vf1R%VNyblon$k`x4Bir4*T|&mDSo!LU#o1%uU$^z*RH1V>C$dz*vPuKH}Lb)LEOnsnH zEEgMt<-x{Kd8jd59+vcMeWWp39z{Io=IdjPUFBU8FVx2yyUV*JK2V=%>?!Y&c(J~> zv9G+ZvA?{(@j&?jNgJ#mXgpYcuyL?_u<=m&p@vg-8V{ErZX7BfYCKYYMDh&PA8i~i zA8tHWeoWqn>yI~%l#etf%ae_x<)e*Hl|R*(Do-_zm5((_rtwVqna0`j zSt&P)a-S=ollWNueB;^jv!>xayK(`~MLd`AT*mYH#dP^OcUSp}J6?X?-Ch2|=M8tl z-Sdv&?(wePwaYKyeXqL@@B8F^2Jidb2k`!YH-qml;{Aa8Al@JJUO-B!{1Q?Qx(^}c zAt~`kT9pqx%DcJl`GH?*v|O+57vAjuD->pzYrcbz>vhjrXf=ast?4_#vgfR+ zTBEA&IIZ~=Zy{h>HE`Biw>{-tiz_XBL^zt6G{a()cDd%&UA*t}nhR*L(pawtwY91W zDvK@EaH~NTAM;4?y~?_(Po~0wiym`cQmUoGk!xPnovk&z)_R~*247OGptaDd>({ZX zt%Yh(Yc*eD6_SPu$6s#x>uYN*6?pE8Yo3xE9|4*Td>nMW#VWc{S#GsfeI)F%sD*9qw8{ftt5cRh326YE)e03rCl{prSui zmeE-coyPmxoj4KMC$nLml|>_Xq2~Lw=F;_rWv@{U3s;)As`Z-t^0lks_=V;5=Bnpj z;$$I)Ox}` zR~DsDTMOY?y)3F;>jG=7!a= zR;&%XWBUhO6Tjvr;`dC`KzeE;)k&#~Omi(s};<$Lu- z^)xe^Yw9$9)ii<+40RSiA78sLedA@{Q~r(BYQ5UHaei&>BJjiw?)3VNS+B8nW39G! z40tKP>L0sxqCJ00J=MNJ@FZ&MMl_Bqt=$Q8><8`cDKq{pm<6l- z;7*fWic4>s^&BdaS^v9;aK-~OuokVmmKy6A*NwpLfH30CN#6MF8?rvB4p#r7@%qXQ zYXj?p(sq!(Yx&nu3af$GYaM%uD`q3tvs@|HUd^i;Sgj0lTLEi+t7C0uU94CW>x|Tt zq<+1VN>Wpcva&aC+&mTJIws-kJ&*(T$hEP$+4lby&;p9DD(rpiN#Ch8ovO1|^AE^u*TlQ;d#+RUQ5mtN(^7~LAE4Hx zx|7F(^|iX^mz)=|^OOSuq&yNH6s_sTnU)lE;sVb6df-&md@WF*Oi6tx(=(E)^3c~c z?ET)Jl#<+G3W&@NbCrrSTX#Fr}7p?XqJHl;$jSIj|$=?WK3G{vg*ra3l zLjj@HeA{e}1!f13Ovpu=#WcXI1;{klrfPs&z$;*T?X<)Jxq#3P;5fCBM#(fw`Zt19 z2bc}9`A!;eobIHNYjz{k$+#)N@gj0&1mfQbkcK)lgk_|rn5zB)U^wTd1+oFv$eTgx z-|l2K^HFLhg6zec=Q;-J{x$RzirJDm*j7B&W& zk4353RpuE(y3O>BTobu(q;z~Ej~N+c>Gv`IPIjZv$!rXu{(%4^2!<9hN|b)SGq6!? z9>KT=+?=G$bp|$v?-@uPLt1Jzhg{8@ztlvIjbbp;$##mJLMQL$-%D)_b_Q2QJ2|)T zp5W%qY-f;o_~tJKV;!sj`a85S+!^W&FOr!XsQx4tt&5iDTkahPJQcP{!ebpLk9CaD zhYcmj@%OWd$piS#C)mHAaQEu$|4;VrEcnu>x<>VmGw*d#!;zSknQ|Bd*E%`nJQ-mc zXZzPF;da4g-C6S{6XbyTeCqdC2IbfN09>KwaQw+GhH3A)EK}M*2KI4HwQkj1&n>n0 zldxgEklt*`x2mJLjFNg58+1lpK=={&?MF5ObshnD

={^&DQpTs%%-BVj+u(H6bMEOD%>eLc6-otYjH_kmz8H@}$WIKKio=i~=!e!OU4HvuGBrVvI#C zofcuzR1x1(<_Pi>%pv3(vVeQ7F<@M#00&zsi(gTL)jssQ;#}##4EQ%t7A{w%5*8{I zEmRei=c>1j}ixX4OOhT|vj30HJsrWCV-}5TrWEuN`xrv6<=!OVWHaNOvrS*zT>ZX3pIe1JLTN2 zfnn9DoNedOVgHcqu0C##^TNUSd^k59o!;F~@$XC^JZlSrW< zWm%;Yp}E|%Ktj^q*OK0k2cm1=aq>34c zk!g`x6+yb&yf7V#8yTF(8`$^m2eu+eglQRem}04|eH-j+?`M!*Ajq%_fbN1h254_T z^n3JjVJkQ!9K}i0*Pcig)zl0AD)@`*n1aAsEvP9I^JyAC0{-H<@tWpEQ|;PCFYXxD zcTIV>YOQs@XVt;Q$*Lzf`kEK4D<~;u)w)N1k;)3@aJ)ANCV+s;m`M2qB-=&HvzKQk zZKY(|l-Fq8^7On0NZC4B!{AFJsFUUSCZ|iZXlV=RA_HX6et7$2Z7DS~nO4iJy~bdL z!EFY280@sSiNC&q$0uZh=NnK=S-{ix$BvxC;~z!P-P735dD%>mF||}#PPLN=(kX6I!V zg06d$ug}eKMq@q$91!_HkuIPj0Ya?TlbQlhY-g=rUGVy5iW-h-hoLZ)T$JCnx6Tpl zJee!Xc*1g`2V`mzOdN4U(Nbt3M=zDcwcN?s5T!vp8_+bA{y=AfO0a2%DnMts+H~un zI~d^+Zj~t!a3}RD^i7{uLizo0qnrysOEvHLDymonzsVPYmtFwCw8aq63)h19`|Cp$ zP=L%=7ghz|`LfQC+(8v)y zC`G&X$m5^CH2e6J^Z0TQtohT&kMnoF)-1Ku(s3w3ACI}_sESgA501p?XSVEq=ZmQD z^|R9O%)$UrSNu1K2z6Q)@c7iMK#nw4bE>#xK+BPaB$<-;1M*HCmM!m)O!3`@#v~)} zXXHHtxiT;BSLHnq$g>zA{>A;XH<%v9Ku0z36mDNgmv^^-9j)qq&P^SIOfUHO$`T z^~b*EnTg!7G|xJ@{u^LWSJ(}#>_HYahQ9mXNGs)j%b_)*_W-Ar9 zwNR<3w@|IR%YaSwG%pa!)mLo<{{WBl0Xi`03f=((5;FKfVPDRcoC`@>6iGOxw~^4t zDOs)!z9cqwW}fi|#}2E_WQI z2i@K71X6}vM@)BSd03n5$|KrTR~~g!fTf4kJLpLD+vq%X8CsdzSPy8^_ylZJ#E?x1 zE~V@b* z62?Zg39+_*=MReTXv(#X_y|#oXx#k>Q4b09dkNrmsJXFO6GFzi{1Q3EBPt3E8)}(0( zu_oc_7@I(pl-v=6;Md9A10h8mq&Z5pJGm$Z6*0(Z;hrry(jcK$lp91_q<@WaPeq;4 zwrGe7HAost(;O|~6V{ht<)qaiHe-}1GY>^bsG=!sI1sW{MHJAnyt@@002loaH42hg|=*=2zXc>LE8L}V9c$&Ojkq}Qwn`2~BHS4Tl; z?akCZFmZr-vjTG%QlNB4ToYvz%oho>3HFP`*#rXy_|Q9}Ah>kv_DInduSU?idY6H0 z65)dWIA8x5gDunlr;&Drqa8!Uh^#I9u?V!f~B$vfU$Tes#!z1hUOl57vOB`@$E&OVH#E~4<`N|sM|7OE2eXc}>X(X$;5XY%;Rxcw6^H(m; zoY#&G#h0~HLF~~0br!g7;|(V&^u}OKks8>Aw#E<2oB}d{2Jo6??gcB0iFj=5 zMC@F+Psc?x$^9e?JB8_i#Z~JECt#JeT%f%TvpF8v>JY3zu%$I&AL1{$&CLufL9qIF z%+-v#hWG3}0~RhIMIUbfgFJbH_Wq#25;w9NHtlH}8Q8RP8+lCb6f|c8nA#sVjSX<4 z?IGB%3M+$OGu}2|H9l>?=2dvJ@ESB~GTpZSO*g$U01MPmFbumdti|Zv0IW^}RLgeK z{)a))%_M5t(N3E7E#GPn0a=fAV0qg}qtydwbr)LwLA0PnH_P(6mF9RSjWGb7!?HDj z`s7|lfMF(fsM)?aZ4HnM?@?;pk% zal}=}-W+#}=n?JYMcN0E`fjHfr52HzjZ*(oCmW?^k(!HAf3}m0QgcYnN2wn|{?qkB zCWM+A*2~CR_+88ytZwk0FgABndrAA;`<5znatJf-AmqHkMQg%BPfoW7?U4 zj!GPd5~Fs>dC61D)ivL_(^`jStO`9E%R@x}(us}FwVmEt4lAjGyCSKey^ro3uw_FW zf%-uFYaF=Mc=b|f9#y9R?;o4z)KmDC-P%qc_8&X!Pwv$!XfYmf&#EZ|VK#CB5_?jZ zsnebX<4v8OVqpP>4HfuQ$!iKp#ik;PQM*_;RaHy03qfC14HU7QA`({n82mVcuQL!d zA!ZTs|LSoBkgm@#E;2C1q%b4%JC~jSrAXP9xgY4|fVLw6-N4?IDZnZ-gnzUg z<)B~J_WO)EVxt7GGRgav2UeyWwD6R&hhTHsYqlTQimqb34K}8a_>dV(7&6XDtnKKS z>NK&`J=)3-i8UV(4#vxZ<^#BfdW{tmc+0+Po-uBYQ&$h7*4`L17W^MPrHBA=54f1A^bPz?27pDT~1%{Q-b4-`4mNt!5*PoPpl~TN;M` zDh+(B`|&Hlx2esMdmxaNQPkp~ma$;hB5foeMmTp|gn`kh9!q^E*zE$p0E=hUe-0)( zz`9twJ#O}l(JZ)Rj`p~D#Me5+lOw=kd%=jC_qfuaDb14|;_GLTzK^WBj2`17R`pNI zmAb4k=?+Oe`dI!pD-BFr3@A|FdJU$-Cb?Zq3%Zr{FrOqAx> zT-qzgZO2CSV>GI_i*XCHbQm}g4lJ}9aAJCcBY@w~dSlJ+L61aOp$)lM%yioP{wmVc zD+~@ZIK(V@)~30hq4V%8s=>oazpFgIXTjfy$I;(l5pu9$_R5R8K=#G!I+AILi*C4U zrWIUar-@%Ct_4W)dBL-=@PRhyfOq%*}%`Sh^Qn*5hZLi4e((e zj+4Oga0Ugk*X8=TGW9iN`}^F}R}nF?&=*-nTifFcV8?Ri*bg!=qtPZ`Al|e_QpA^I zHZY%9>_?D4V~z>`y4TDA9~NxbC#{5>Zo>%#us)h zfh)(R1}MWOjm`5YVlaurp8XutfE2(6pj1vh2NECU5>QXK&qkmQ6@M?_MW!W(?xU^) zg9kN5z@Wx_FQz$VkC^-I?R@WjR-bggkNG80N#<|`FEWQYu)36NIEyyWB#4JP*s&qQ z3DXNVFF1__K$+^IW`$h~4pB&Eo3`x(H zcATXn11P13;dnGyb{!sO!i+8m75pW__G1XbyiS8rQY%P?_+^*@sYHxtzRawHdbJ{s zYwJdM4_S^S7-s;iE*z|>G`TG;VY)*j-M9#3U|F#rtbGP` zg*RcU6GjytLgInYt346L95_@{tZeaU6spyRxh@x&JJ z`%D{f3o=DsnjG6=j-4l?5 zK8MH0EN`=xxZ+bne-e0^6x@NO0<0%RcMw*Ov^(Su<2U1uxTE;Zx?}Dx{N~(ocQ=0X z`gl{hppP}dej>-2U_X&#OyxmuNFQIq>4;&(9rt0ew~Qcu$bCfOqliE19+vnR;*YtH zOMI96S@)>>DcE4fz1{AlJGE%L$J`R$Cy?v7dqQ&ULHr5#q{R2SPr6T`SNo9mY4?<* z?RTGcPb2LC_cQJ^eh;`)0I@S+E>`ommuG=0Vzdya(j=k}!X$_w_HZF4S!yH^_Sq#s!zrS`6pt{mOCX+GVhT*`2;pu` zmvF=&2yG6LV?umbzKw`c+@85CD!|E6^&wgdM=>1Db>de}E`{j|oaO@c%N${m0mmBJ zs(M?C_y34#zrx@jGZ2g=Bh8VJ2RJSk<{9FI8`KT|gn53I!P^YB?D)5k_A+<;enhD3 z#Xmcf&ZQCJmti4SOtnYjl}*eb+h&n7DOih=D7BrUm(~sL&~%uuKuZb6qf&W?9l%4~ z`dCjx(dWLFn2irb3SsUbm>KtA{_NU_crvMulMAtG_%9>tmT{B+0l5VK^Ho$9x7|BP z{k5;@7*uRF?I3s;N^HM~5L$$<8jE0#UF)43-mQxO0x}>oeHF0ZSv=JB$b{E0Pp^*= zw+E;U{G@T${d_GMg@rTZ<0yTdeV;=i=FVV- zETcWyGnL?(xqIM-+v8f9lmub;r>qQa6?&^-ZP=I(t8}+LiAkgI-tAWSV{x+vq?C-+ z-o0b9eMF8BLUvlfWf~FaWw3}OaFEb>8VK74M9#izgTUqZ#<}I2#9<_4TsBl{M*2Rh zzKwb?RaC%)Szil^lPQrC!n6!tG%xC}Bg0m3z<^L9nxQ|#0a1lwKxH*#7tOvQb@TPD zErA2&^pHq7`h7fx6V#}kJU}RAf6JV1*^_rs`X|^2)`u<83zj?Bnb>l&_qhDvfwB8O zFsrLb-s&DY&2^og>p`XBEEh3XK*3L)I1w4I9sCJ9%pxL+J8}{c$D_2HsjdV}tQ&KT zU3?DKbKVPXdtNhP(@?WWe5ddWbnknyzbdpCx8;!30!}G`Srk>IJ}jjPg-)R(Tv8cX z)t>gJ$o)kPIxs;?LuiofG`-y-1xMo@mXzpbzSXK%Dt|Fq*IeJa>aW`%YKL`I{|-y6zRO^zmImN% z-)go!)v8o}s;4m=qtv7s2t3KKtv#YP*_4D}Vpf`Tgu9$NUxyA`E7`Gl@c+Y9shE04 z%5z}zDoAnZqqk6;2QlPKS%g$}>aOUy{#MdWt8Y&9fwXOXNHIGr&xM}_+=s+*XrZ-^Gr$y269||GdZV8)PlBQlR2nT9 zZlKNOq0l4qcbsV|x~AtM7aE*#@LP49M2{=>=X%abg*l#ipsxzZv9PzK5aH0ZOBb(P zyL4f;a((vv?3JCS3n$6_q|R?8yD8o7Zu*`K+vd%V=j}l6yzSTXwgRQr3eM0*^LK9N z`D-;33ILltoS=xt;lobHA)ns9@rX<6BQyfdUNS)S1V=e4a(g8yT)%@O^Knhd*mfNJ zZXR9Vmi3pDX|US|<8?0Wn04EP?0|JA&Gu|eN~^I+DA9w&x)bv47^=8}>NCXgxItAQ z{(wpeu(+fsZ~!KcaiKD}8~|HF{j2-+dNgi9 z`yfCO^)Gscus<2Xwrq-f{c@kmmCQ1EMUKp;^7pI8S7D*neD;R@uDN2UBY}xueZ&f9 zPw&DTpn2$BoTq_8@OgYQXzTW$4{$Cv$i54{JUW(@V=27DA_P9u*houli=CACSmP|N zdLzhpGVj7~fM<3wgX!w;U?f+B$UzOr6EiYj*wu3TgH|Ov&Z|88t!)C&=fjm@KwMH$9adv{gXv!2 z966X9==Nyxar|0aV2$ijbGso4bEBnnXQm%S%0lHAhSigY$UCW=~VX<~QC3eIr1 zIOp{d4>rjJ;p>u0VpS&&jX-HgAV)u4k9RPzaXtOr3cv$nyp0|Pu6NBMjuG=gLmfv! zO{-F{ugUcQx8X~Q<7GKq>4(L zOT2-GYuLk$U%_<%`o@vS%YZ9G5KE5lYEBobU7gR3G|ybEdE5@sZQ@q7CRc|fc%W;f zVdfZ`y4>vowqNpxeIN$rC-Li33jz5R?&NaW!%w#z#<{t)e-y;J!UniWIY4;uRpaKj zz^Bnf{0ba-AUeZ{EY3oa8xPXnH0j18`O?VuW|9w96PkU+-KRfa7Ww{ak`F#|JLP)~ z9$M_%X8v6RW?%Hl|Am|1ihB0buJx{2DMUgg{3irF=&CgoO?t%0!oqSHLl%Kk{SU^7 zrxdvkMX6TpWM?zX5Hs z1tVb*QYn47_#8+T;UWMdqSbyp!K}SZcURhv>ziqpx~=NN*8mr6Ol&mJr+^^E%N~dd zs?K4bUis|2)|r2Z7|$QK>S_<8HcF|#BZ=It%0W~;hyY4T(bMki}Q{kK>vH4o}PvLt7OGi!_vO^ksp9D~WJ2(MilOko!7Dj<4i zVrGGqB>tli!FNKnPhUEi>u7|7CW1$D-?IR;O*v^p!_s50>!4U1{)Q!Cly6jFmj`N*&K8z~L*cN{9@<=mrUj0{@K5OMv11>SA;Pk6H&F93tST?<4r=hxmgc zN0vUVZ!s#!4~OlC_F8K#Hq$4C!aTH}wFUtI$3Zj&P``=V;EE-saG)Rdynf9K?m3}8 zQZkI%0E`1DVB~NDo<2ALF8JbDkJ3ty_X3{wuKrCYCAaZqiKgon)J7N+Bl`2?uhrfF z&?cTN+SqRSQ%Fzot_<}Q&FmG3wQd@B5gEAk2FBrg=GtW!Z%|S5)D<*iRQ60}+pvm> z?vAvv7lUI@H#Na8U2dP81#XWQQ**6YiWu2zElgxhL@rTBi?({^e{v>fG0j8(T3J}U zwhlk^M&zF%+N^K$<7XMjVvSvRac1_?%xvYtb1%<)p>q97`O;2|uJDp4INwB}MvC_m zS*G>Fls%4FpMXm~4>(2Ii>UK`lZ>JU{X;I`5wA3CFqs%y%T`Q3nq*%36HWdR#Ow{L zJr0XKa$w8j3aa<)4N#WmW1vnre;^J>^N|CR!hirX%gtoq55$C=Y{{8z#>ubP3ntW+<3B*+8=UJ*DhahPM&(=6cuCyv$_|cE@5!!e@W}IaClYs z7WOrcT>>7H`k;6^CX)4PvvnJmXM80~%QLuDYVS~!9xo9MYmWyrCC;5&xK$#uxj9ve zGtJFWy1_LGss`Q+!_ih?Sdz)Mvh$(~WDedn(PeFB#Ar$Tkembt#m|Te(#NG{x=uw0m6(d^%|iy9m*ne1FZA-gj$LFJdZZCi6Y3EG{w+BU$}T1a#a* zKeSK&*vr}x|Dv$qdi95}{iD?nGkR#cvVH)NGp8$De|m))+5I@x!plm)ul$gxE@Ka+ zGiDD2a1Qz)fb1k0mJ4`%vY&*2O{_WiShI%9so>uFEZsEeht#pbTsUAdQpkZ7U!zke zZYxDzo;SiFVmP=u9qL6>?bE%h3*`VcS&>Ht=-H~MZ+ho9Szca>(_DpN(yNPMGFDst zB6_DDLV)cfJm6nLJT&V%wBDu%J zEwwKtF+{igibp^5O2c2$*6S_% zjVL@N84bz;2H@loH-8uem}zfaD+^m?R^O_Y*i}v!3E{iWY?U)3swi<#g!%F z4s)VlOKn`F9Twhsc{PPyL>64^2E=}KxD#MSxC?+zqH9M%I&t+tLP{7FJiZ!h_;l@zEL%IYiR=o{mr#)BuZ^RPf1vscK0gLsZ)T1+GtDWnj z!O`j&9vh(Mx#=N4ItK0`Y7dw2z{$7!!;5QijR|fx$0jaZQJ%_K%Ub@!HcV^jtq0dCw`VIi%8%2my zGTiRyBqs9aKg8EBP~MzH1dc^%TGr*CnZ$mOO6QK#-7&{MJ6FIzxbPN+@X*zWad`?) zk359WVv|JQgRxU)&QK>}#!sC@iwbA_r1JtJV^yYkgb^ez!lN%Z>L&@r3EH`pf|;lp^xcyrHp*e}FoQ!39zhUh=oo>cG>V?-O88IXI)A_pX2+bz;!x4`9WEo|p7-NMFJ z;TRO5IBNsldDn9BUs%h3LJ&?|yma~e%U5SBiDBpJl^3qeDjaDu)P4peU~+w#+;=2L z`Ncy`V5MghCSV%o&gh%f&SEzj{zU|-EJW6H4muIfuteo|Illy+0liROWE8yh$^=Y_Q0(dEs{}Q<5#3>D(5g?d^0f2OW zy2Yuahq+2rrdrvp+n2Jn++UBD`f#26Q2PshIjWV7+F#(u%mGpH_NW}Onpa~x>;ri} Baex2- literal 0 HcmV?d00001 diff --git a/requests/__pycache__/sessions.cpython-38.pyc b/requests/__pycache__/sessions.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9870630f8c6b409c8294a93d247bbc84fba43e7 GIT binary patch literal 19522 zcmeHve~cX0ec#ON?Cjj`-ri6C5JgEt>Ibp45kDX39HhT;@RQn>9AN$RNi(X<5$)CF4i7its+(&mr+F(^=@gTOX&+qT=_ucE`<9Q9gfA-zq{p=@&ru`>A z4F7CAyoAf&(>2Z2Dw?aiMoX{g@@`ZNdCycbxMx~sJ6p->tYfyUcCL~`KI>X7yFFGJ zllfdL-!4=N?P8_a9_9K-?WWLyXwEbA+v9?oj+NUa~WZ8J@bo=qj zjR&v$ChlKQ~ut~R}T-~?T#UF+1=J*U=jot`KBh95Y-7X*#Y zI_lR&*Z0%zoU`?AceCO7=bhSCuyLLZzlq24T>d=*)M8nW#)3xMbG=qj!$aYwa6RF< z3ypg49)L?j*+g}<<~Qn9wovs0QO-tVs@Ya!HL@ChwYU9jl-m-mX1CFaY-D;h;d@c; z6SZCs!z!E6jQXHi6V)}*ZC72iA074H3`DISRO3NbgKiZyqN&^QYF(3q{}_tK+n!kW zl8WU_l#A;`6Ssti?mRasIJ$8Cm8-A6wp_h^ec{IK>(`d6uif~>jpectjl14jZL1Yj zH_)z+$A;gl1@#U2jEPR7|4uDv-19h14m=Z0pls9IuHLJ)sx2&aG?6yq)v#1(Ch70i z&D+aMnCs+nxBH1&XZyD2HU#zzRc>_feZyUzPfxP%A(e@&(t%7toUc)ZsNLz%kLoxL8CTpQ4F;uEd)(M{&=V_ zX#w5@`mQ0)Bd0OvUy^lBsEKX#zu|2=_ZzL2v+6k#_|ak)d$ZvQXUpfZtYcx{n7w1{=sQMexEhw;4E0|$?wU*5yZYk08o=F{yA>>(*y0&SE69beXY7D*kutXp|#CM^wOQ6s6-@OGm_e5)30gvyjc7u%s zKmlw3(0CynNe$=n5`-iZ0w3ylb?_ZPBEYX++wwi<{>A`36ajT!e9`&b8R7Z8E~wL) zdFN-}*pYxMTaqj3tO~ET>8DIgcIAM`fiSkV=M>aNcG3oDbOsJB^r6~yt96aM@ToZTG zopfz?47@+<=G_8HEVt;63aPTfG$$&cI3OKvye zDAb#n2~yC;hqN6~Jn?8~>}9z1crt;knO#T{E)cbx?LU7Tn*(Y^l5sm;{W;`bNKcv{3et+*-EzeDx;cX0Iy(aSSiTN05|lvA~MCE;v7V zd#>Z%mzg|Eq}=94c^1a`Vtu35S@-Y`4MnBoDQ>=)NJFxllxHaNI2B766sCGornry3 z{U?xUAdj|Q)XnUy4kF3RFf&DzOzWlissGfW0CJ$wR~DD+oPF{mpc3ylarv(T^VYSU zJPBq9xnZXefJin0Db37IF)X}ehy~ypWQd*dFppi9vPGDu2K5WZL-?j@_4B|@Xid-> z&YD_a2V+5gEwh;yLTL_oTWA(ZDc;s~O=%FpI7+Qu5aniRGb?^2m~gXulYDxhgS=aB z8$XZx-qZtV9${f`8jz)nZ(?jZ$F@^)?Y)^`HY_!dppC*hDFH`bz`KcX0x&xU=>4Ox zh-;#GG%ULLxACS^vOfkEOy1yxc5MzH}U|f3hWbjDyQCyF$8JjuiNZbsLFC*`x&d}^2djPIS{Go?E8e-`}%&wyjv zUO64hc=H^vcaO{_jFY7xx#)q-6FEzdaw1m29L27c7{g;0T?Opde zo~Q+&y4|&uIQyi+w-XIeka{h?LiyRHs86j%3jY10*WavrP{my5b2}*&_vgyGcmbdE zjf)q*r_V=J$?P>zTW^DpmNVZg#d^bq*SyYpu+c9j&&%6A?|W0rqSo=(JaHk>HX=)@ zRpK*fR8){aOtOHZUacMFs0?!_Q8q(@Q$;?9r6oG%g5o|;z6;R2j#K&3H#<#sztqyVswnB1e zK>-366yzd;vRYIU-s)BZw66^c-F2e^+6%UPYzlZDNmVorQ4-`0!caPtXd1E=^lJi%e=tf(#Esh?QGW;v-f->f_NwPpPfRYYM7r9LZ7_lL9JZQAuiFAi`7v zyB+a3TLUldxaC<%(#3VzJwzsP2RZQulg~1#GFd?qm9`rmW*UoCepIR{{js`vAAH&e zlfgy_a_eXcoEqew1e74^yQ8y4zjOuRv=8{j~Op8oxB(>DIZ zDompF58y)whC3prj3~ahF$Ito4Js^zydBd8A%fU&4rB%-swoAQ2L_&DuzL|CuxW}H zf^2B+>K%wK5Jj>iePR&|!wR!X`IS=sdp|>|eAS~mm@FH`2dIoSsE@UdG#E+#D-A%D zyp;&(P`js=DP?d6zZfcLVXEEhHe9C#(VY#~(K0`T4mRf~E0shQD#u={)d0;4pl2xA z#ZTL#iUCaSL06`z3LqZp9rBQXnG3e620&rZ=b$eK*zGF~fuoDYM?|N^a!vx21Xi(( z^^J1z1Ypn-TfVe^2!}6<394}nkdzisD1Lzn$?Bk)$fnrCO8Y=l?4aKFarwkyEejws zj%Tb4^|}73Lxq%K+#P|7aWp{UBZdDzLSNFtF3ov2YCHOqQu@J^(|95R^wPYi|std9rL5LM`C% zuLAC1FzL?*d9cRpLolYmN>JD-Dl=(NfT;6zCF;1AdZUD#ups^bBFG;E5TwH5uEBOL zYf|*h)qW46=`gei?}7&_*cQ}tUs^**=(ZiOgtf-{7MLc;nzJeke2C1nzm33WY|kRs zbMDn(!U5m_AYn=YxYqzpLkK_^hWhgwS_dBkMyzh9aJhK1dh7PhPk-h-zf-6I(3Fnc2ZPZi z^fhfM89r>YY1#1l0sXp|qvU650dH{uFO1+tqEblwN}EZw`JtxgMF9C=UK*X!ii^O3 zt~3R1zpIO11b9XnaQyzvAzhV?#l`aY0DYtkAo;GS<3p)#h*QjcnaN{pEDuvjtTBkw zEU-WBZA;cKvUo&u1LRym!BFdxp%{n7RvT5wR z-xcnD91qdm6hPKLhm2O7l&mBV3XBh-G&QRbu9p~c?1xwH&zHL_GcBEa;Olr`piUKKVfPej=@m|NW_D|O+rIRq1u zVE-+CGpcF|A7%60y-8#=)5^@k`W9bC|M4HVM`|c#3wq=t)Fwh-3wqe-w#r*hlP#pqr@Q`}vMN zW0z$eBG1T=Oy?zB{{GB!!7k*Q^yRrZTBm54qE%|W2z!;XN@ZcUnv&M4X?I3i ztLV+5wdy(P!O|;Z%5zo8$3Clq`>6XEY82h)UB^8Ii`TgOf_vJ19J!MF5%&rAN#rKn zr`$8BHR+yp%eYUu=iH}ppLWl?7jU0(=iH09&$^e~58-~qea3wj_oMEI-Jin!n0pig z%zTuKcc%7pSc@$}xy>UhLeiN<-+Uzjc0N$|JwS znY_zHDpLbJYKmo?rQ&J@=D1=a!{qNEA>D#bD;_}}OjI(oQEEj3)95sUYPCs{brKb| ztZiCZD`VrrpJ`dV@=~kmPsU(3mwa|$g}KUdg}K!96Nod&C^GchX)dKj(O4DkK{&;# z)dw5_xsj&#rw0ARZ$~;}EnXkRC9S!%lF)v|MV1}HQqjiCGHhzN0f?Yf4$(rALC)QK zlXL&|p`M3ks9ug3%&ALJZk99r`^b=C_H0VaH0F_M!d>_%4dpP889^3*7Ga~QK_)d| z>Z0D2gM(0O+TwO-`nM@D`?rHJl;>$Q4hp*(^{i$QCF6Ug2ke7pzX@EExTb<>q%-&q zW;s`X8}NqbBX~ZtYaq|IbCiGscx{@;@N{e!y4%}%n4_}P)oNeDe6PV%0__AoVW;NI zOV{1}%B@icwsU!3!Ic$?mNA~g14)C96hpy>m6D|_|BZm^nfF2Mt~eVc7;&UP+MM!u zrRre%^=)OZazMAFOBAC>{B5#V7)yLFmJ`y(k}1W2v@$GjO8SwIvHAV|47Cp7J=WOZ z00rfvOr7>XI^`=5%bCG!2GT|%*2a>HKeul&(}n=6^dOb12qcA&s7~TgKCaMyj_(Pe(U>x;eXJ&gqzYu)f?zA;my$d>d}LRGath6A zGIl`@A_y(yTIJv134ZLo3rmzVnQB+(81ZprqGcN zhF5ycVK9&Ls&c!g-Nw{!bh1BmDlz6C=#1-^lBoS__M>$r$>o8L2ucVXfC{;?a)wo? zG9f%5-MkBG!~7K$w!$UkkHFJLcvb^&86Zdi7I?xGfS_A*4?qwF)RC@f2XO*|3}=dn zkrpjPTuclAxsffCionGM*e?Cx;x(8HVGNafz*Y|Q6+1;nx_h;IA0|rLsJbw;flrg;$Jv^j!J;_~L$NC9bmi_I78CXO;;&3KNW zRd98U^bK(MB{Rb0xRDr_1mgK(c5C@47oK4rh>kb&X`tEX~-@gClc{gY*aW zPyW4cTB!AXOFHBF6Nv;E%aS9yTL#TiDg8Jusp-Olfv`^%762^?0qUW?1}*K&2-QKb z=69AbV$wsH3ln8w5Zp9iehL>qSho7H1fx6W?wnID(Rt@)EE~bau)am-18wi1perl! zbXP!eYnyOIq~P_#0Z?w}viKWVly?pB*Km_N@oq-JhEzr@HG=&+#webn1{Ra6s@X-o zUr5#;=B6KO?cYn*{^qUa8#fo1{=Bb$l#3@S{t0*BpE5a{Cem7wQ=+kdYGlPzh>zd? zSgZb@lU1jg{LlH4zaqZM_5Hx*HYq8eA6e^S0^NL7mq3pu_@l>&mn1DGkO4UXX;I_^ z9ucU6p~?6jsrY>pR4!R*dLjPIR{H;U)#re2Iz;%Cl=G7(pMMj(4J%`6C4rrloUEK3 z(9gk3@eRD%kJyNd^n>RKX}C14&`;rvFAn(YQWnM#qye14ByBVh%#lLb)-aU4{ue-& z$lO11kccw3USC#w{x7jb@5WxkRpeFaH<;T;-7~$dU?GKp{D709{6_|X`@ ze?e(t#p@>z1le0$@BtWze|^ZVXXT-`ttQZv;F?budk(MD2QT*%1agw65_Pgopf>Sekoa8^=f2wX`R;_Lyh@-Rv z>IKr5M_mDaz@`i$mhUon%B1h;busybXgM7|A47aB$hp~F9l_Pc9*$%%V4oordt)wx zzj5BF$pd0qvw&RDwGlLJ1>^AjAfUflg6M$nU@f*;7Uqs?J7d8_IOdMMt%r7XGMIAn zuvXh^Xpyx-yE%;rD5MC8KDv_+=pUL5^Y9Dhn@1VweD~{d8NOE~sBsMT?O5m8DTEn> zE?acm<1kx+2iZCX0@jOR5#{H?;@)wDJKJ)`#XTG0+;&*RSf=@%-|U}8P9(3JMFo{~MR)GAu0}A$j4Rr;3+<*Kt-7VNOI*1>EZN+C_`}*+blwt(m3<);^opU}yq@fVX zP`v}*NFwGEE*bS9kGI)7kpI+?wgIt+q)TN$ctTe|F9(MQ) zQU7S{FC!^GqI9=AEExERSNJF$L$ne^`A;G0O$9g$9)M7hQB>rk0#vqY>_(SUmT|#| z)VyjHD;_!}|9Mm#3;s3<=a#UHW3ZdELsC;N>nMwzim=L=N%0#w2nGsx)MXRfeXA z({Md8Zr3sf!Ws9ZPIp`K<3SH90oQQ(zl{XgpovZ#Re2nkPg?`yXo=~Ft_&?1Gnor> zdl?=al_w(YoiWrjftO>%w7Zu=6UT#W|5Ld0h-xRuHZ8P-*xOz!&Re7>K@R>ac$MK} zHjpQE8l%4%dD5ml@~k5%4)TcUMSUDkb#q@efpd2Mv#HhK+J;x(l$ORJE0T0OsiSl- z1WGHt-)+;*rw)7{f|V{@IBBDcGD=Q-1q-trP2ax$$=9zhEiYBCU9Ge`h_;e&yn+uD6jYHb7ge+aZ4WY#Pta8;VLvjuJ|I(_HCE2b6}}rq2^icKSh~vO zkoPKJSlm zkUTV!>3=@mj>KI^Q!Q-V=~3j^($5Ik7~fKNf+EJ)q(inkCBR7}&K&$2-c=RUpLW_m zx7Zzul1h4jIhj$ZGS*J^_PJAHhbPGx#3DC1%HpgCXi4&WNdoe?myrd7<67fQ z@rQi7Z=-%6W#8gP*~nj4Tn2j<1NN&1C|hTg5&H`Y9O5egoTLFW-c@@ zYj;n(>gf9Y+uk~yp*VZ($QZFW&@GuK&F33ZDad&J?14aJ8hHz?RI}+(6r#+5SQ*hz$BK;z!v#AXZlS@DV4{zdw zg!dAip{0KtHw7n>Pmue5hs#TnPVPI2)87ez$S|tI81Nz*l|V{vn&|xBMqYwpRAe+Z z|Ca>gM+FVrs>lDxob2|ukoylQWM;{J5XqnJtnB!}5^z`L&e8-h*v~H}`w7UTp=tCG zK048Ixa8W=M#T`fH>L^3TsNEq)OIt!B#+?75mZqQh*1mhZ#I#}9se3-5kWq`#?F47$#1a7 zT(#(#3GFD&{UCcn$%_n6QVB<)a2!u#iZ{1;6A6_al=`8Q0y#e}{fL0%?ECnX^g zgiG-rlRsi2r!+`7Iq|oVd=dhvPd1H^oSE1qqZ~7O{G=OyYFhU5Hcm&`6n)i?aw~ty zUDN(F|53ZZzx;Tgtn0%8#$%k|@hyP5`}LtDsmioP@&- z!rNkWmO*Suoa+VkKWykuiRl)?S6vU(h|WrPe%|@e+@-loNrbM0xHkTC92F z=bi>1I5?A0mrhasa~<`u|b&!8_v$u;`#ZM;ez3?r!*pX5Xk_Nj6+PAg;Rr+CTk_9rf?(+F4p9XH+)F&`fnk2 z&vRfC_Tq&|oJaAp$pM3wa^DXF2x>^OlmAIT$AvH}@yAHZKP~<{zxnq_qKuFKU|}*w zAhOvPb4sZwz`fp5;nTi~Vf`AvBE63+wKOukP5v(cb!>8s74l5_ObSejOvag%kRZGt zE7`jO zkAW(AgcMbsBgHXL##Lf5eLSg=kfR=&Yqmqj~lZPaEqX}$9Z_saI!eWh#ZeWLhv=LO!2Ye P#N-zcRrm#se^34&?xe3_ literal 0 HcmV?d00001 diff --git a/requests/__pycache__/status_codes.cpython-38.pyc b/requests/__pycache__/status_codes.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8807ff24a6ce58e041959c0a7711c552a92f3df GIT binary patch literal 4230 zcmaJ^U2q&n5#HVV(djH%k}dxvV6uRXQLtn=HY6CMB*Y1U7?UbH6y?aQR@-x@jrMld zvvabfl6i5eNGb$K0to>JWUBZH#m@t86fc1n{^5BZ!wY`q-xLmC&)%OSCwq0@&h+&3 zboX@k%sw_cT6gfg_Sc_$ZhqWx{=%KX=RtJt!$1EyfSk}Fm%PMndFMRWp%Rs;LRA`~ z8VysOw$TWU(sml7ahjkVw38-j7wx7!w3lw9+vy$jPTEKBqC3c^chh_5y|kYW(EI2h zy`QG&PWk{HqG_6;!!%1r=q@@+$LNFfA-bFHp%2q>`Ust%kJ237OZUd({Cu*@*~-5$<$B77RDM<`468x`>3DmP`7iD*R}NZl8BX` zwp&8QQJ7!`oM zX-11MliH3RJ95uka6Nu-ULNkqx@&f}UOyvSqOjCo3Ri^RY9~5|)`g>BeMS?{X||wGYWIzuLzaHYsgBdO zKznC?G1k!%LL$hN)H0Gu&J)nFTbwqz7cGfsIg@d!r#v&#R&k)E3=)|(&17evfm|6pU4oLYUwHuw4Mj?XS`eBwj#glR zpa6l@3RCEvteJ`>v9G2vsnU?`l)W;X3lSg;g)$ZU!r(|Mb>lG(8}rOa5LW>+=CJpz>5(Nn9Jgk}P6M18=HpqwB}v@q3z1Rb|6h6Gld+NOjK ztI4c%YMA zX4^mq61Ll}4W^-^yUh}fB-h9QGvyLuyx6IebnNW{Fo-F0|%y;h`MsG2Nu zS9h$Jd3K;=cDK_UrO5_d&_b~c1HG0BGqHtk>iuCj=8ld;#GeY&rm$|!;hww^v+sIl zcNw6=t@xna4zZe{L>I0tjKc!k5c^QZR%KJ|yhr08?2|EfXDiN&3eW2l!2^d)wo~i| zS0&aZC_G|DZ=zy$HIdP>4XlY`-GD?UQbbU+X6pTRAyFz42sxyFl0aVsf*Ee79V%GH z+gVeEBzik%hB_1s?i$>!9*4S@i>_xTxJDw0Is1#V&D6Ri4DVS&We_SrrY4m4ch-!* znF40SG^B9F&xI+Y>*f?nT}OiD+UHDp-zn&FD>C4!|ghAvcH;iF($x5=C<uq8D3!cD#O|_ohU*OfW%wV% z4Td)W2vmCoMp3|R7n@h+8*&5}^&Y;mRPpC`{&|3QHr=bvN$31M^WG)rlDpxeedi@l zd%CpYZ8(=puCw87mdU;9=92|=vvSobMlMWVsF6pdC!MDqT_tCESgqh)-s4btbLb0B z!{v{VCu_Q_s;cU1KZ#N68oL=`1`C9~8TUpRcmv zbz$KEzC2F9$|ca7nKf>zp27qR*FXEf@%hJ)$NBtnn1rqQ`?Kt1sKfbF5?>edXK}@! z&*E$vxrTC;Os~wYFRrL#>+{_T+HVYQ(M+~xhEFs_ibF@u<*Z{FPbVVn4^s5N^Z#(Pk<(&`ljaRhln}I8W@ijTs687#IZVS)NzTi%V?D zs)7SN)``eX#l8!bUI1U=nTpk7nKj!BaNKdX=8m{^chaj3Rd3YX+utaA-VOB%#!gRF z)ysIZ3aTCHHqJW5!E5zZFLI0d2E5;MfT^9pg}0rEIUAtQgRML{Qt{lk`IWYemA6pd UZvL&tw#sC#pH&~Nd$sZZ1F6DC%>V!Z literal 0 HcmV?d00001 diff --git a/requests/__pycache__/structures.cpython-38.pyc b/requests/__pycache__/structures.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ace97f573a233c584e2928a77c7bf8cdc2736d3 GIT binary patch literal 4443 zcma)9-E!N;6?n4UwwOsK3YcTdt`k1{+`1l03x{rag4&xTLBdc$BY>RP+mm;TM>XiEBPT69=X1vTR zFBz|h@|n}A;JeCe_^yd6zH5Bx5o^_lpVy^$J`hT)w$gGC=z$cf{^!eAy?)=y+NOT7M)b*j9n^Sb{WKDN5o_i8QRMc8K1?{?W9}xYkiHI+Sh>l*A@23& z-O%f~zH(0l7UD@VnhL^#Bt7l*+U|Cw60!KO?B1Gt7UL zNCeX-e34fVg^%=7Up2{Q;B7<1LBe5g^C@6t94G~HIi5uH*pCJ#q&}86cgD*;EtX*w z59#}5a--cNlQ!k@!EA0R8^9N4Dt9QwzAH?1X(CJ_i_ zYJFnyo&Ze<34p*L=3+mLNilNyZeXLb6c?2nx_^gT{RD$%XKchgYlOmcRFgMC_QD#m zXZFb2w?_6~EL2VnA?5dZtj zoqOFU3eoEx`H|o6Zl@`g*6zb3@T2Zy(NDW+m{Q5n@aV?z-QnJ`{Ak!M3ib45T8!K2 zX;$;RFb=ineL!~9Ffglzzmhe4ZvnvD1$jWW7Z<(LKC^>ctzj6!;(Z&|jeaycOrK;y)-t)GI!C3sei!C*Dm!$t6(HpiOQp-Vh z5?rvyuDg2!je4&SO~YXlk%E)&lXEhNKgGZV`v-U#v8QWAutx~=Ew*ougHBknO$zOf z<1TMvMOHHcDDV{wh{tPQkne)ZxX?t~u6#HAaFGuTo4jxNF99zbBNuwRF~eb-jH+N@ zcEzcEyEu7g=r1Mjq~1|YLfkEFo!Rn+MiQJ^FRZwvokvWUM(n)&3VDP#D}Ose^O&Qf zlGQ_1bV8CCO42A>-A(kQDHI$V0@^zHBTT`+rd*a2^jPvWKVn1i#Bc$tFpE|0)%b+ft;c4o_y(xMEMs30v5kS7}G27RB?jy65#1K8Waj z+4HDv7|$SYc@jXt8zh}7r=&_K-==}mM7lIk$YhI#4`^7YVOoLA8+q9{lsK7kioN%p zdZkvbIpe=tZK;+!Jpj$zMZTg-QL&sm^EAF@`<6NPEG;~G{3NDohx)mimHh$(IW=I2eW}iE+updlzgH|ak zkpr``8sK0fe~JahgJW3B5Anl=l`VN*fb)Zbf2Teo|8#MiErD4TYj}OC@U$%<2p89- z^316rV{7?lKU~4CK^D8sAFJk8%QE5@q1gnk2d)K}=OyRhxw=45h)M3bX$@Z$>3?Ap z)F`z9F9%)1qJ7@Lio%|Yc7|(nMo)VSia>GE`y`8IrH|~1bop}-jk_fV zj7S2;A8?yJ75QMVmXHf&0~iuTyt77-L=n!ZbUGe4)aEdt^GaGKZd6Y5GGVZ0#7qa0 XmDM)$9N#h+rq;AnUAC_|*M9pSo3}5> literal 0 HcmV?d00001 diff --git a/requests/__pycache__/utils.cpython-38.pyc b/requests/__pycache__/utils.cpython-38.pyc new file mode 100644 index 0000000000000000000000000000000000000000..85ce455ffcd2f5318946f9014a0187c62ac53fcf GIT binary patch literal 22316 zcmc(HdypK*dEd${VJT;7o9iRR5h-VDTk2bjTcw<e!Tu<@yYrpil3-^MX&zJ;wS5S zihJsxDt=1(=W9>Z_ZIg`d9e0$eY7|#<)PZX`dD$SzQ4G?{!H!;CxygREKbWUL96Pa6Y4ss$)2xRnMy9IGO)M@oR&X?2~br$E#>YO@{^NZ?&n#B1fby2;5^A+D&d|6#mm*2I}drG~i zUPA3FYDryDFJoR;)s%V#HLt3gdR1LR%{6shO{39%tH`N>4 ztm11p7jb@Fy{Tr<;tlnddK>4WDydsIzv-vfW>gtH-jdQRN^h%*Qn*)AzM8}NmYP=! zIG2@k-5RU6zna(n?PWg*gTiuHtp)ja`u^qf(+kzWt2fnh&G%Zmd8exUz+=;DxZ=$% zH!5MZ*$BLFp&WW;?R(3Cue`g}aG~1puEj&=yLZdApvfk4%QbJVsXhN*=<7zg=2bAr za=pdD3iDiUg526%0McL(sxt*W) zL%i+1D((!iuu_^`X_bQ@%A@0Q!*tEhmIHu}4~sTc8=*h%>r%MVLZ?hMD799OMmu^t zm;A<^s%|#wej|*wuRo%%KKpXH)xwB))yFE$=2F#PEbG#oZq`c*V8B~Fb{>zt+z9+e zQ04m)I%95>zudS}u2t2IYgeL8aru&81^}bY7ppb@<)-pqE9(Yl`}mE9_U~33sytis z&o$M`q}EM6=Cm_~h9Bxmlqt#i{i^_a^;ZG@A&1|2 z{DMEip}i}#^ayyCRkK&^2DzG5b5`xwtj2cN3R6pYeaN=>_l8A$eBEjv@7QZ@$6idU z)B|VL2{X9Pb{yqC01w>>T3SuTHEC0`>c*qxFxor)qjggH3mrH3BFFByYXd6tz+QDb z?gi_u>D6>6t+Gpw{tfj2}HaOM*+NIa>DyQ;zI$EGSsD`@bO)AH7IJh>{Nq4Mp zaBWNudHd@^Q^Ql5K)KjHalJVg-UUs}yj-p5W`L`i*8uY7-C*YYverO?n+B0* zE(3Qo0PbD)!w~BeM1xm}bdwFPPqaxcUIE%`kl#c5L_2%3)~u9krz5*C-F9XQk-fj| z6!t%~pC236kKyrA2J55!`6ye83xk*It)>oTp`+|8Ex+-SzY=Y^^wQ)TrSmtgU7MVm zF5S33c`Zr-lcS+mmwml*vs_#DC-0&8b<-~$v_SY#R{O15x#CBT_M>#Y999-0`|?-@ zDAEKyjBx@ZBnZCL`a20NB<~~_$vZ_ZXfXVirB9&i!&lEw%-je7o0+9@tz4fu+iG1X zhvk_oeCL^IzuuZ@Ra*zDjUX)7Y6tEdYR}%$$J#RqD_buJPb;)mqQ~WNF%g#l7;po< zatKcgF5+O>j_ulcXUHD5bB<%@Z07?v#iN5?CY6axIS2Rg=ce-x&pA6|56RyM%K8*W z>LUqa0#pJ+61z|ct^1Z94($$5`MMQ4=B!W#@qPW8A1>>LH?5cbaj!ZDuIhV)s240W zmut$K^*zIed{xNjz4+hx1;4TskUQU7@q!qGy!kSEgLisl9Wqi6_mR&kF(4bGl!EbZN}j>`~q?VE0X~@rS3yuq0S_n zVmk_<)q&1*-_ct;)|&mmTE(@1|TP{NXDz6rI~js&v~E4b>F;QR1DxJ^FD^f zRn{tWJn$*b-@*KMo23P}o0Ym0>Ixlek^Jwf)VQd5r#*!Cbnu?3gcmNl;9tdre>I+Z z-3rrdnNI3~y_m%_z-<%SaPWa+8>nUPQ_T@QxEV7D057p>)Pgy)A}#Ueqh^J z-^P>cPe8Bj<{5y5jj^5At?qbFvo3@T=-3@+hjpJ?N8WHqu%=SYhZK)eI7OK`$f!m$ zvS-UsFs(#)@UV=Y^1({#)dsMc=m}*Z1ou3E_VeL_C=$>&0-&v^GEgwUL#5?xRgfd7 zwtcT@=!uV+09D=f!B5Af6|3Yqv%cT(nqZB15cHQ2>Irop-`iI$_ytjGn#-Z|8J!*V z=4$16X@@zn*;v6d>QZ^dt3mR5VbkDkr3rRl2{%l<;9XwVk-VdYL*rhs+#-|4lWH|m zyY}nNJEUO{CTAJ+O{+k*S*t~E;QLGOlh2H8(a+1;($T zB{dt7(}WbC0}BN|55f{*6uEqjC85M^r>O6;2aJXII-*G!Hr3Td2r?- zd&JH-Lv9{-oQ(57-TaW$ZcXRVYP;irZ|B^t_BQF`I{GF?xmY1Ul7B-ebSd{~;MZ5| zwz0yjI%uX2y#i7^wd#frw1XE{)9vkHYSCRwD;HFlegM2*%>eIr0q?V*s;dd5k{VQs zx{I{Wb)aMD-JQ&;Yg@M$Ztw26Dz%i;cTB5rfDC^%tJ15vFi#Bv+~8}S+}e=nCb>>_ zanssxC-(qcWv8_|(4Ocx9rwNs>Knie@}0c?*Bw}cJ1za2%5FF+yE@p(cLsM_sLk!P z=0N?gfpcP}dt9rN3P;ETK%t;z{&noHSo%*s4}FVN8o$q$aBJMkOWuli`Bq~C?Gp(H zCgpXhGF)zfX3EPk`IWtSD54Dy%CV6*qCFNw69O+z%Vo5Ain`@h>g0kzQ*HO1gP&df zLoA0i5v9vPrCQY~TPp7H2RR>TTJKT53bp`rhv3jehbW``3ei1-*+HGshgmtycLH;% zR?1;h2l`c1>SyH6SxA%W>~d%(KQ~e=_*9^So*C!mol7!Hdk#FVS%JD# zjdG$wLHjV`^GOb!tu^PXjZ$?ZbqvEsFp7AI)A|u%*WbiivHD(xuG-B_tw?Tm!N^P@s1AY4>8fk=5O(YtrPox687xz=(`nEm| zd1i-{Y^TA~Q#;{xSV~DbjdF%%Lnj?j$aT@;$Fl81(;Ai&+21^DOInA%u&+kB-sQsv zVVbjxV2og7yk+FVL;K|WAK>3Z`!qDlEI=S)yo|}|Ej)1f^$ZSCjs_Te0>@GqN;i3{ zUn7ZzWez0~FKGI&&?FFd4uH1H&VS%I`VQ*)z>-Bp8b1NcFQE`Zgs(wc196W~HIeHS zuV-M;tU3S{#JjWsAOcf{sS@9Z;|FFh zU8lb(YZUBhS(?4NSJWUXf6XM*Vjzu0jX2hl&RocEm7OeX{KO|^gH zno-rgMzbN*0NnP7l(EfTgNfX$1mj&W8>Nx#iXU`Yg&2n`N(EZHX#a)|CYi zx=2D#c_`Ye=YE3q{U^Zwc6;rL=R}M^erfv zn+LzbLb)+dHJ3{6L;E1qO)mM^(E9T0Tc|Kp>n=8{jVOz0a+yi$rKFrKR$CWYnIodm z3lQa?BE}6dFJ7oBjQ@D@V_Ab-+4yCnjDbXyX#qObdr=NU3pz!qu-=M>yDu%oz7hSl z#kV=3man8u@CTTA;Nf7UhaA@(fq=swgdGJRXc?5e1Lt8!{{-6fp(dFnb!$P*0t(ce zAz_ASM38zz&}*0q-8pKSPlrH<`}XH;Skhv0jHSw15dCx+D-NC7&>u=rR$zf~<)KW_ z_NYQ#K?6}j@Zz8|E-RF=vMkBE>Rs?|G@zM4H8DLJ&F~Sug5}D>c(UN-hK$kU-5K}% z!hB)e7&Q|U>&DIL)2BtEl$QLJ68szD$JzJhzENC{j(el18XbFUOvWRs^o;pKT0MHy zQRB-QT_0^4*zC`iD@)$KI_)rMaikxZp*62ohC^wh``|)h-C8nsGbO?Vfk=qJUZEms z;N9adVds0LOl<*vEm@HVS6~f;`;~SLlT|D^zC?#C(p__GV2Y&_b(Egy-6#P(JhdJ5{ znjeD08)={qdo~X9QMmJG8sz^wee~fnfohOg|9POp*uVzl)L%fY;HLgbmgr{?XF%k# ztNshT-hi8%H^A)^#LQtdw6-LeDV~8Zp{5TzQ>f@F4AfZXVKtK*-?w2;T!ez)z@~Qo zoC8hWQ4ruPxmL=82l5znDo0;xfD9TV(6=0g7Q7Ej0g46G4k!(OI)n4551L$zJ5m0|jd6B~lMUN0XA zd|bhSqT(^{(sv{h-mm%eR|mjffi{J;;By^TZnaUVErYFz*IXSi{4*$c-r4>tJO%zM z=}+Um4CA_No-A#ln;bAhcxDLG9yU324Y^?q-vy&P)T>I>V2^vR(w7eaV5*6w3_ZCC zBLM1qt+wKo=ioSy7lk>SKmp>Z9K;RoHZ@#lqW?@VnlW)ppaPdrSXvP0`7KXW6$xR8 zZ%N*QFAqPxU)ElIxfWJw4TaS@Cj(Jhfm#YVYW(vIl)yP5GE~EukEbyWIL7*qut~Nl z6zvKk5s_w~@Cgk-P zEHUb1*`SemtdX4)1|SptZ*LwJOEA8WuT#b{3tY@%&(KqsCb z6^LaRd|bJPf=lx+qIZ;mc7rvHTrTfMj%WTYK8`@E zs>>1X;iB5h5$0CG4>}Ii{5=XZ-f>QnzMVBjj1l>OQIVn#G~om&xrTehJmaQHgJ&$p zJOhRac#0eI4D`c@DH)L*BU64G0{JR|BOoU!dk`|?W?g_%RsExI5u&z$9wZFknfSr# zU;$z+v{u3em;qD^d$3h`r(%p1U#nT`eWP>D=BYh`?{n2)Vb3@SQlHubK6V%GtORw5 z?bKTKsWH+92au=JJ1zw_2;eRA)jm}a(%d&@v?53~j7X%9#~d9S_vEpx>pmEYqag!` zZ6Gzc2yQi+F<9txD-%vsY!ipvttpsMAEw8?x1;=K1Qf19gK#k|iVWUxXTsxvt3Nt5 z@=3S;uTUdLj+wwmP@_CsK%eN7tdWX`_Ng9%)L#a^gS@_ilNf0FFR{WEdeeV}wO{4o zQRaF~a2z?N+P6q>j5}D{NpOt1I3PL_Uf$M!4fTDlJ}RGGmj-Rd4tz*d79n~;P+hU> z!7=CP*M)?RtrHl?&ej)jomLqV6oP~)_FqGZ;Xj-QNTLW8GDMMY|JxfTXiWAU^H5w>SJ8B>n;PM4-%%q^4UH#P$#vJ5NEJWYNp%PZ`6N<4Ll zK+Zh0XK>mBGyMo!j5&t24#E&m94{LP2A+fS|Uc&1ymUX|hsw_^d-%2cG zKqofjp_?!&ABd#v2UV9G;`bK>hiGyti_xIzCPDltV=gv;BzjD+UhE^&V6m+KIzhy+ zx|OBF4=#bfiSmaqoSKN>ne3>bg{e5mC8W`0!5 z!e$HvBkRo#dCdv$0(pv&Hf9yXDHsnSlA}ANen@t!4eC9CHv>$+j+6cyJZ#Y0A1^B; z!H{J@fj2V%=)OtNksVk${ZlyhVHH_KmrYFLB1Gg-!FTcizH<6B6}%|SLx3~QmTR|r zjXP_|zJkM!*$D8gJOBoo^Aqs8Cnd_>b}a$FcmP0+261ACc^t}$)!q%DH({;`==Ai> zi00IThW+Ci@U?#iI(`xcHz6gukh@Jy9!c`hM5x8ObHTd^o>6a74mON09iBbd35aEh zK5GmDRShc30TXaM{I3SVvhRKB$dTj63*J6qVR7^*30gNBRK$(0+=TPJFxLL$$vV8c zh5gUN%LlC_JoV{1}U;-$KAP&G1^#ex}>jfFo{uqT9EpSNo~APtH7l zuU^B@$vZz{?5utT>#dpF5^cTULo+u{%-2lVOCRNpEDWO-{D%5jxte3LOs$4U7j8)S zG0MlYM>;_?(9-4kdYQ!xo{#fJ6y5dF3-J$$r#tW%D~$kG%K2kALw}&87{EQ9`xE_l zaIX)0$uqm!CCNer6kW*h*Hg$bicXKc(Fz^JxK>l(o4{J+wWPp1Z_!1}Ry^xNos7z$ zRdy8-tTn`yFw#J0K;_DMlHw$B;J`&q6Z7;5_EDkl16+Mhpy#)+#9oGGbb8Mk#2JMt*L_6a0In z=3EJ(Z6wcq2&rP5r}t5@Zt>5ERJ$=seT+^Gpa&DRU~(W`!A9*Fg&2e+MP=Ko2?`{1*|*@A zrl+rZm&D!{nbFS(4Pb+bUJ?EXM)(MUoaip?WHAm9^QJ(hMWcYnM2Yx zNu@|3g<>Nh1_`~A8Il1V2ixF+-A#UztpEoc0aCYPYSHEAuFh`c80F@Ri&dvOnpm9gGH zJ#q_N$c1$K2MJT4F%qjSa6u;QTLLd|6~>F(&lL0ovJuSc_vq-rxTJY^80yn;-)Y39 zRqUTQT<~HpV6@@*slp*d92_Zla}4yzH0aigvmQNG#&eP(81aM%iG@QD&68HIOl%k;i+CVS6Oy0Lvl5ej6>? z=gyuxe_`_C3zsgx_|lb^r(U`G>b2|BH*UW6`WwaaY(@EV^9$9*rCPnwY~9vDxP0gC zy_NQ%!$*!Dd-nLJpZm;#Lg}48TA^;$%@_I&T&#=15WE(Nn-GEb9q`&U`sh300Ew=( zn!?@GeOtWx-y|P?VCipyK3q^pN~P~39xGYP9w;gU`T!H)UzknnX>pLc|uBT?lBWiMM1138In%FmQG!7tEP!{!T#Y65D1DZ)y zk_ai@_Of^5+GY5~lEi+b6T|X@16%N%q+{?tPI`lpaEBrTw2?GKmT9f#&%=K~>YVvyM@=2!(Y?XwR0PjVtgLA! z5(tBhvQ_wE8q4)4RawwIp&@4GM#DFzu1z8V`sUBSlM0Pc@ zk?CC7h{RhHH}i5ANE5_fj%#J|z|#?UHVnRQh3wzuY(cqk(43T-0iPIkXaDWXB z{S9%Yo6GYHSi{6WbWnU)US*}is7F)q0l|)Ci5LN0;4hU?B?7~62t+j8_CPdPf+h@+ z@bcKJpR9U&+b2Ku)ZV8@_l@m;X8hp9iBr#?dGoDz+K-`jVy6O z?WI6~ZN!XtB64Ln4mQ5CjI>xt2Q|n0>Vfx<_ae8$!v{}#Qc^Vgt62qvnc37uj zI_i0cdx1H0MVJH6p#TfW-5SI1# zd5x6K$`VrbTmk+2sM-KV{RgPKOSn-Zx8M(!nm&jTdRyUaGb>%r)8!-*&Zi=U6FT z`p6ypd!a zLl!toR&WJpcgfXPS=-(bB5S!j2C_5_TyB2>aB&x@Z+xx`7a64imIO1M>4MqzO@Yo$ z?EUeAD!D++Jz%s@d{n9miRcEypqMwBC9~_~PFYvERcQlT@6xL#nF^9z)c=7+uHrNZ zFO9^2MrcGaeJdtaqq`3JKcfy9GaRo%Oj`OsvFQd)aue1Ap8l#TahC6KHH`cnhU6s$ z=8y~PA}e->_|{+x{x^)$$Hs|ulqD7X0{VmjS$k11jZNF{ zHS1Z(Up)m4kJ)$^%>7eVBabZXxGn4+(yr6~ZS08>c=l|`LDbQ}&>KZ9wldcJQBpB8 z+?M@OLxR_t5CMPXooaJAFmcLpZ>8$j)VMHXtP%ac0W5Y%W-M5LTbP}=!MzQ#%xv(d z%phJ+1W4m~P_i>*Iqs<@<5bw^IZqMHq+Fy>nhlQnAK}Wl1k9TlZ`LMxl@e4(rglaH z&6+X+(v3?w$n=YNtiR2Ll;~d$<~&SW?g*e6z}$cRzwn^GCFJsv2+!je><5~Wp}s4! zm&+!}_`9Jf(aen#bhssO(cC$r(unKX`gc`EW#2^!q?qGY7%<=b1ElguFKK_@)?e)~ zx#tkFbKy$C_2($Xffi8l^;fW+0At<0Wokg&x00I1Io3gryNg-<_u*U}gei^k13C|| z;SpncaIvBd6nE4HzYUkHlw^}4NBwq~hcLkAqAwq}$p8o08gt7brJrWSP`GJPw*MgY zf5ecx8d}XIY5&h!uUgk&hIhE-=V{JU4FNGXwXX0Vg2Eao8tBy{hkwt}BUP+rXy3;%_T8QB zAqB52u4lULdiiC7RQ>7^V-3<5Zau|=8 zrU(GS{n@J{NUVeRC$MWy8p1ST^d{*VI|K!maTF+q_=I=1Hx__p31H+f`wmY{iu5@U z9mUcS4247Eiiz}0MJd=wAf$r_+ZmK_2E(D;&}l9e|Ch!9`X^5**V` zkoE+wvyrWLP4x8M;Nadslahepk4oX$8V^?DU?mysNxm-$(8}mt#9g);5W7=(yD2MJ6Hm>_#lG9BfgIyecH~rNAqJITQL}HK!7>) z+o}Br_qX#)zTY|k?eUHeb2@(m6F+d4P-t&HTdDZ1@W3P!`01O^n2IP51FT^(Y@z?` z#LLK=iQlMa?F9=?;%>A#0ZSYf8YN7)VipVrS$nL$X3BBj0{3Y&bav8J54v<{I?x5r zM8RMeQ(r=X7)&7t6#KVUQ|)bVKf;_uRE{Zy?rIwP0k`h*(+e|3FQ7{jfnVG++5w!9 z?izBP9@tgtC~5Ym=7S{WYWlgOOWJHV|^OpG}Mkgr8w$V@~^G*R*kOj_JevMt@`$X=IR)HGi? zh&IKc2I|ThA18t0Y~%(5kIY|hEa`v1zQ4%B21c`0pqHeW$$&?RYEPrBm4hVZJ|hT| z5h&8zXsJ5G_^g5PP7Yx^?U19N0uny7EnVdBmvDHWZ`FsEtmsP9e~t^J%t6(rf0|kn zRXY3}>@3Y_q}oxf#(l{bKs{6MjvHkgwUTIuTDZg()6(Ob%Cg5=CS)c8gcvdr9K9f4 z=y0&_(9z$J@z@uEXZXpnRZ8XMDf~*(dhIu;+EU?1i|_T!VF1_PQJaz;oOj5&@2O$v z0Aw|r+mH9LnuG&Rz0dbU=kCd)V_FkQvjB@e4=eqTWkN391 z)wxOiM;s3IKJ$H(~jov0s z?Z?PgmK(HPc)4BJ*4%wcLyaj0QU~pfcWHa;qhxL(2G|e(=*gk;^gMpDwbkfk_nnyx zGgio!Rx>b$Qo&P*f~3HxxJMh&4{WByc1W<6>|90!gWdkQ7i14I;}%yS)8GVSmOeBB zo|sZ8+X>3Cf`Ggx5E;~A5r1YksuB1mmFhy0h}n($u>-fk#RHfucFIne!_<1r{zYUQ z!_Nx!c$rb}4YpB&LQn%m*5s4?fKtIcVDHdzpE3#ZW`8^$qpZmwqj7*nhO zJnSyPo+z_*l&jHADmRHeamEgn3EDa1j8eeCx-oLJ55OJA&!m6@!ZerU01Zx{y$Kc2 z-r{j!w!{NzpDk$yL6Fa}JP6=KF85q-gz^?D>AYb=w|KKJ6TBkEGyHVS4&XgNd;%i` zn=#hgGuPjIdq&SR-rE0E=k2H3n>SSStpgbg<^6g50?wpknf=9B4+lCC6v7Z>(LgF9 zr28&NcWCSI5FEocPpp#(9%6O&V|D22gU^9~+yu&yVTeyDVFmkwjAYGFd42~zKYk2` z(H~HbQBmSdc({DZamE62kE@jh_O0EfP=ZvwM+&v^QAAh9B(a!@gMhM4871 zoa1Pcr8zE)W^XVBsxD*w%r?m#rSn%N&rX#tO`g3ld98G9@`cIQOXuF0o{XM&)Sc_o z*Yq)V{umF$OXMiwNib3VE^9>R7Ih(oo!lES^l^#zB;p^MLsel5($5e}>Bg|K{HR98 zap^__SEku9^npGDeCo5%tDglBG(2h_QmHn9&Y`_yZY2F*CM?NOtjx9)8S03m&u@eurCgVkZ{dRQ zqbZ(zTLHfN0X@x6KrLdoHb{HAJq#kn)qRO%+@1juBQPi|HvZlSas+Uf`2n|QUbh-K zT*EB-JjMhOr`vNcBhg5{wBWU{As$&NCV2&JWJIw*i}(-?_t;~X8MXm}R+DeD$co_8 ze7&2#@%p#KxI7 zi!aOxDRswbKXXIAZAK5b`4SyY=IcElZVXzY7mmA))ryc+G+bhN{o68Iao>Ea;}A<9 z#{rh!2t^`x2P!0r$>7m|bmRAOHj)jZzMQs<`&(RKx@#<~b@ zgmAq;woq$kp%^iiVS~+xE@DlQZU_AfA8XX4PiEWI$p$Fl&5j$QosoEhEKxRT%0BTx z8J{01!F)|aAomzA>0~z_`68cSp0n(8(9BGZ^5Al@eJ!>wzq~@b7%V!FsJmfSyEz7- zuis>^wCp2|wj(v??2RkarSoS?=Wa}0xH4I~dUpC!v?-~-`pUKG>k!B=$jQv*17rFu zhq=i^gNHjj`~(kQar&ncE(ym8{9F_u3~cQKuiBq`hd^#Rrt zcsR%d)n8G*L_N?C_(UjA_@EjOWWmBm1kZZ#Q}l3gA>>Zt15Nl8=V>6Q6?_E;7g3Q% z{qd79^0Uvl{6gK16!L$ad}=6_8_f;n26CtJM{~KHm-|@mNgO|w-u_^T4lG}n$&ZN+D%l`&#A5iQ7 literal 0 HcmV?d00001 diff --git a/requests/__version__.py b/requests/__version__.py new file mode 100644 index 0000000..b9e7df4 --- /dev/null +++ b/requests/__version__.py @@ -0,0 +1,14 @@ +# .-. .-. .-. . . .-. .-. .-. .-. +# |( |- |.| | | |- `-. | `-. +# ' ' `-' `-`.`-' `-' `-' ' `-' + +__title__ = 'requests' +__description__ = 'Python HTTP for Humans.' +__url__ = 'https://requests.readthedocs.io' +__version__ = '2.23.0' +__build__ = 0x022300 +__author__ = 'Kenneth Reitz' +__author_email__ = 'me@kennethreitz.org' +__license__ = 'Apache 2.0' +__copyright__ = 'Copyright 2020 Kenneth Reitz' +__cake__ = u'\u2728 \U0001f370 \u2728' diff --git a/requests/_internal_utils.py b/requests/_internal_utils.py new file mode 100644 index 0000000..759d9a5 --- /dev/null +++ b/requests/_internal_utils.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +""" +requests._internal_utils +~~~~~~~~~~~~~~ + +Provides utility functions that are consumed internally by Requests +which depend on extremely few external helpers (such as compat) +""" + +from .compat import is_py2, builtin_str, str + + +def to_native_string(string, encoding='ascii'): + """Given a string object, regardless of type, returns a representation of + that string in the native string type, encoding and decoding where + necessary. This assumes ASCII unless told otherwise. + """ + if isinstance(string, builtin_str): + out = string + else: + if is_py2: + out = string.encode(encoding) + else: + out = string.decode(encoding) + + return out + + +def unicode_is_ascii(u_string): + """Determine if unicode string only contains ASCII characters. + + :param str u_string: unicode string to check. Must be unicode + and not Python 2 `str`. + :rtype: bool + """ + assert isinstance(u_string, str) + try: + u_string.encode('ascii') + return True + except UnicodeEncodeError: + return False diff --git a/requests/adapters.py b/requests/adapters.py new file mode 100644 index 0000000..fa4d9b3 --- /dev/null +++ b/requests/adapters.py @@ -0,0 +1,533 @@ +# -*- coding: utf-8 -*- + +""" +requests.adapters +~~~~~~~~~~~~~~~~~ + +This module contains the transport adapters that Requests uses to define +and maintain connections. +""" + +import os.path +import socket + +from urllib3.poolmanager import PoolManager, proxy_from_url +from urllib3.response import HTTPResponse +from urllib3.util import parse_url +from urllib3.util import Timeout as TimeoutSauce +from urllib3.util.retry import Retry +from urllib3.exceptions import ClosedPoolError +from urllib3.exceptions import ConnectTimeoutError +from urllib3.exceptions import HTTPError as _HTTPError +from urllib3.exceptions import MaxRetryError +from urllib3.exceptions import NewConnectionError +from urllib3.exceptions import ProxyError as _ProxyError +from urllib3.exceptions import ProtocolError +from urllib3.exceptions import ReadTimeoutError +from urllib3.exceptions import SSLError as _SSLError +from urllib3.exceptions import ResponseError +from urllib3.exceptions import LocationValueError + +from .models import Response +from .compat import urlparse, basestring +from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths, + get_encoding_from_headers, prepend_scheme_if_needed, + get_auth_from_url, urldefragauth, select_proxy) +from .structures import CaseInsensitiveDict +from .cookies import extract_cookies_to_jar +from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError, + ProxyError, RetryError, InvalidSchema, InvalidProxyURL, + InvalidURL) +from .auth import _basic_auth_str + +try: + from urllib3.contrib.socks import SOCKSProxyManager +except ImportError: + def SOCKSProxyManager(*args, **kwargs): + raise InvalidSchema("Missing dependencies for SOCKS support.") + +DEFAULT_POOLBLOCK = False +DEFAULT_POOLSIZE = 10 +DEFAULT_RETRIES = 0 +DEFAULT_POOL_TIMEOUT = None + + +class BaseAdapter(object): + """The Base Transport Adapter""" + + def __init__(self): + super(BaseAdapter, self).__init__() + + def send(self, request, stream=False, timeout=None, verify=True, + cert=None, proxies=None): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + """ + raise NotImplementedError + + def close(self): + """Cleans up adapter specific items.""" + raise NotImplementedError + + +class HTTPAdapter(BaseAdapter): + """The built-in HTTP Adapter for urllib3. + + Provides a general-case interface for Requests sessions to contact HTTP and + HTTPS urls by implementing the Transport Adapter interface. This class will + usually be created by the :class:`Session ` class under the + covers. + + :param pool_connections: The number of urllib3 connection pools to cache. + :param pool_maxsize: The maximum number of connections to save in the pool. + :param max_retries: The maximum number of retries each connection + should attempt. Note, this applies only to failed DNS lookups, socket + connections and connection timeouts, never to requests where data has + made it to the server. By default, Requests does not retry failed + connections. If you need granular control over the conditions under + which we retry a request, import urllib3's ``Retry`` class and pass + that instead. + :param pool_block: Whether the connection pool should block for connections. + + Usage:: + + >>> import requests + >>> s = requests.Session() + >>> a = requests.adapters.HTTPAdapter(max_retries=3) + >>> s.mount('http://', a) + """ + __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', + '_pool_block'] + + def __init__(self, pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK): + if max_retries == DEFAULT_RETRIES: + self.max_retries = Retry(0, read=False) + else: + self.max_retries = Retry.from_int(max_retries) + self.config = {} + self.proxy_manager = {} + + super(HTTPAdapter, self).__init__() + + self._pool_connections = pool_connections + self._pool_maxsize = pool_maxsize + self._pool_block = pool_block + + self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) + + def __getstate__(self): + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + # Can't handle by adding 'proxy_manager' to self.__attrs__ because + # self.poolmanager uses a lambda function, which isn't pickleable. + self.proxy_manager = {} + self.config = {} + + for attr, value in state.items(): + setattr(self, attr, value) + + self.init_poolmanager(self._pool_connections, self._pool_maxsize, + block=self._pool_block) + + def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs): + """Initializes a urllib3 PoolManager. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param connections: The number of urllib3 connection pools to cache. + :param maxsize: The maximum number of connections to save in the pool. + :param block: Block when no free connections are available. + :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. + """ + # save these values for pickling + self._pool_connections = connections + self._pool_maxsize = maxsize + self._pool_block = block + + self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize, + block=block, strict=True, **pool_kwargs) + + def proxy_manager_for(self, proxy, **proxy_kwargs): + """Return urllib3 ProxyManager for the given proxy. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The proxy to return a urllib3 ProxyManager for. + :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. + :returns: ProxyManager + :rtype: urllib3.ProxyManager + """ + if proxy in self.proxy_manager: + manager = self.proxy_manager[proxy] + elif proxy.lower().startswith('socks'): + username, password = get_auth_from_url(proxy) + manager = self.proxy_manager[proxy] = SOCKSProxyManager( + proxy, + username=username, + password=password, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs + ) + else: + proxy_headers = self.proxy_headers(proxy) + manager = self.proxy_manager[proxy] = proxy_from_url( + proxy, + proxy_headers=proxy_headers, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs) + + return manager + + def cert_verify(self, conn, url, verify, cert): + """Verify a SSL certificate. This method should not be called from user + code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param conn: The urllib3 connection object associated with the cert. + :param url: The requested URL. + :param verify: Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: The SSL certificate to verify. + """ + if url.lower().startswith('https') and verify: + + cert_loc = None + + # Allow self-specified cert location. + if verify is not True: + cert_loc = verify + + if not cert_loc: + cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) + + if not cert_loc or not os.path.exists(cert_loc): + raise IOError("Could not find a suitable TLS CA certificate bundle, " + "invalid path: {}".format(cert_loc)) + + conn.cert_reqs = 'CERT_REQUIRED' + + if not os.path.isdir(cert_loc): + conn.ca_certs = cert_loc + else: + conn.ca_cert_dir = cert_loc + else: + conn.cert_reqs = 'CERT_NONE' + conn.ca_certs = None + conn.ca_cert_dir = None + + if cert: + if not isinstance(cert, basestring): + conn.cert_file = cert[0] + conn.key_file = cert[1] + else: + conn.cert_file = cert + conn.key_file = None + if conn.cert_file and not os.path.exists(conn.cert_file): + raise IOError("Could not find the TLS certificate file, " + "invalid path: {}".format(conn.cert_file)) + if conn.key_file and not os.path.exists(conn.key_file): + raise IOError("Could not find the TLS key file, " + "invalid path: {}".format(conn.key_file)) + + def build_response(self, req, resp): + """Builds a :class:`Response ` object from a urllib3 + response. This should not be called from user code, and is only exposed + for use when subclassing the + :class:`HTTPAdapter ` + + :param req: The :class:`PreparedRequest ` used to generate the response. + :param resp: The urllib3 response object. + :rtype: requests.Response + """ + response = Response() + + # Fallback to None if there's no status_code, for whatever reason. + response.status_code = getattr(resp, 'status', None) + + # Make headers case-insensitive. + response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {})) + + # Set encoding. + response.encoding = get_encoding_from_headers(response.headers) + response.raw = resp + response.reason = response.raw.reason + + if isinstance(req.url, bytes): + response.url = req.url.decode('utf-8') + else: + response.url = req.url + + # Add new cookies from the server. + extract_cookies_to_jar(response.cookies, req, resp) + + # Give the Response some context. + response.request = req + response.connection = self + + return response + + def get_connection(self, url, proxies=None): + """Returns a urllib3 connection for the given URL. This should not be + called from user code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param url: The URL to connect to. + :param proxies: (optional) A Requests-style dictionary of proxies used on this request. + :rtype: urllib3.ConnectionPool + """ + proxy = select_proxy(url, proxies) + + if proxy: + proxy = prepend_scheme_if_needed(proxy, 'http') + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL("Please check proxy URL. It is malformed" + " and could be missing the host.") + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_url(url) + else: + # Only scheme should be lower case + parsed = urlparse(url) + url = parsed.geturl() + conn = self.poolmanager.connection_from_url(url) + + return conn + + def close(self): + """Disposes of any internal state. + + Currently, this closes the PoolManager and any active ProxyManager, + which closes any pooled connections. + """ + self.poolmanager.clear() + for proxy in self.proxy_manager.values(): + proxy.clear() + + def request_url(self, request, proxies): + """Obtain the url to use when making the final request. + + If the message is being sent through a HTTP proxy, the full URL has to + be used. Otherwise, we should only use the path portion of the URL. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` being sent. + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. + :rtype: str + """ + proxy = select_proxy(request.url, proxies) + scheme = urlparse(request.url).scheme + + is_proxied_http_request = (proxy and scheme != 'https') + using_socks_proxy = False + if proxy: + proxy_scheme = urlparse(proxy).scheme.lower() + using_socks_proxy = proxy_scheme.startswith('socks') + + url = request.path_url + if is_proxied_http_request and not using_socks_proxy: + url = urldefragauth(request.url) + + return url + + def add_headers(self, request, **kwargs): + """Add any headers needed by the connection. As of v2.0 this does + nothing by default, but is left for overriding by users that subclass + the :class:`HTTPAdapter `. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` to add headers to. + :param kwargs: The keyword arguments from the call to send(). + """ + pass + + def proxy_headers(self, proxy): + """Returns a dictionary of the headers to add to any request sent + through a proxy. This works with urllib3 magic to ensure that they are + correctly sent to the proxy, rather than in a tunnelled request if + CONNECT is being used. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The url of the proxy being used for this request. + :rtype: dict + """ + headers = {} + username, password = get_auth_from_url(proxy) + + if username: + headers['Proxy-Authorization'] = _basic_auth_str(username, + password) + + return headers + + def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple or urllib3 Timeout object + :param verify: (optional) Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + :rtype: requests.Response + """ + + try: + conn = self.get_connection(request.url, proxies) + except LocationValueError as e: + raise InvalidURL(e, request=request) + + self.cert_verify(conn, request.url, verify, cert) + url = self.request_url(request, proxies) + self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies) + + chunked = not (request.body is None or 'Content-Length' in request.headers) + + if isinstance(timeout, tuple): + try: + connect, read = timeout + timeout = TimeoutSauce(connect=connect, read=read) + except ValueError as e: + # this may raise a string formatting error. + err = ("Invalid timeout {}. Pass a (connect, read) " + "timeout tuple, or a single float to set " + "both timeouts to the same value".format(timeout)) + raise ValueError(err) + elif isinstance(timeout, TimeoutSauce): + pass + else: + timeout = TimeoutSauce(connect=timeout, read=timeout) + + try: + if not chunked: + resp = conn.urlopen( + method=request.method, + url=url, + body=request.body, + headers=request.headers, + redirect=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + retries=self.max_retries, + timeout=timeout + ) + + # Send the request. + else: + if hasattr(conn, 'proxy_pool'): + conn = conn.proxy_pool + + low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT) + + try: + low_conn.putrequest(request.method, + url, + skip_accept_encoding=True) + + for header, value in request.headers.items(): + low_conn.putheader(header, value) + + low_conn.endheaders() + + for i in request.body: + low_conn.send(hex(len(i))[2:].encode('utf-8')) + low_conn.send(b'\r\n') + low_conn.send(i) + low_conn.send(b'\r\n') + low_conn.send(b'0\r\n\r\n') + + # Receive the response from the server + try: + # For Python 2.7, use buffering of HTTP responses + r = low_conn.getresponse(buffering=True) + except TypeError: + # For compatibility with Python 3.3+ + r = low_conn.getresponse() + + resp = HTTPResponse.from_httplib( + r, + pool=conn, + connection=low_conn, + preload_content=False, + decode_content=False + ) + except: + # If we hit any problems here, clean up the connection. + # Then, reraise so that we can handle the actual exception. + low_conn.close() + raise + + except (ProtocolError, socket.error) as err: + raise ConnectionError(err, request=request) + + except MaxRetryError as e: + if isinstance(e.reason, ConnectTimeoutError): + # TODO: Remove this in 3.0.0: see #2811 + if not isinstance(e.reason, NewConnectionError): + raise ConnectTimeout(e, request=request) + + if isinstance(e.reason, ResponseError): + raise RetryError(e, request=request) + + if isinstance(e.reason, _ProxyError): + raise ProxyError(e, request=request) + + if isinstance(e.reason, _SSLError): + # This branch is for urllib3 v1.22 and later. + raise SSLError(e, request=request) + + raise ConnectionError(e, request=request) + + except ClosedPoolError as e: + raise ConnectionError(e, request=request) + + except _ProxyError as e: + raise ProxyError(e) + + except (_SSLError, _HTTPError) as e: + if isinstance(e, _SSLError): + # This branch is for urllib3 versions earlier than v1.22 + raise SSLError(e, request=request) + elif isinstance(e, ReadTimeoutError): + raise ReadTimeout(e, request=request) + else: + raise + + return self.build_response(request, resp) diff --git a/requests/api.py b/requests/api.py new file mode 100644 index 0000000..e978e20 --- /dev/null +++ b/requests/api.py @@ -0,0 +1,161 @@ +# -*- coding: utf-8 -*- + +""" +requests.api +~~~~~~~~~~~~ + +This module implements the Requests API. + +:copyright: (c) 2012 by Kenneth Reitz. +:license: Apache2, see LICENSE for more details. +""" + +from . import sessions + + +def request(method, url, **kwargs): + """Constructs and sends a :class:`Request `. + + :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. + :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. + ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` + or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string + defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers + to add for the file. + :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How many seconds to wait for the server to send data + before giving up, as a float, or a :ref:`(connect timeout, read + timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + :param stream: (optional) if ``False``, the response content will be immediately downloaded. + :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. + :return: :class:`Response ` object + :rtype: requests.Response + + Usage:: + + >>> import requests + >>> req = requests.request('GET', 'https://httpbin.org/get') + >>> req + + """ + + # By using the 'with' statement we are sure the session is closed, thus we + # avoid leaving sockets open which can trigger a ResourceWarning in some + # cases, and look like a memory leak in others. + with sessions.Session() as session: + return session.request(method=method, url=url, **kwargs) + + +def get(url, params=None, **kwargs): + r"""Sends a GET request. + + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', True) + return request('get', url, params=params, **kwargs) + + +def options(url, **kwargs): + r"""Sends an OPTIONS request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', True) + return request('options', url, **kwargs) + + +def head(url, **kwargs): + r"""Sends a HEAD request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. If + `allow_redirects` is not provided, it will be set to `False` (as + opposed to the default :meth:`request` behavior). + :return: :class:`Response ` object + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', False) + return request('head', url, **kwargs) + + +def post(url, data=None, json=None, **kwargs): + r"""Sends a POST request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json data to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request('post', url, data=data, json=json, **kwargs) + + +def put(url, data=None, **kwargs): + r"""Sends a PUT request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json data to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request('put', url, data=data, **kwargs) + + +def patch(url, data=None, **kwargs): + r"""Sends a PATCH request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json data to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request('patch', url, data=data, **kwargs) + + +def delete(url, **kwargs): + r"""Sends a DELETE request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request('delete', url, **kwargs) diff --git a/requests/auth.py b/requests/auth.py new file mode 100644 index 0000000..eeface3 --- /dev/null +++ b/requests/auth.py @@ -0,0 +1,305 @@ +# -*- coding: utf-8 -*- + +""" +requests.auth +~~~~~~~~~~~~~ + +This module contains the authentication handlers for Requests. +""" + +import os +import re +import time +import hashlib +import threading +import warnings + +from base64 import b64encode + +from .compat import urlparse, str, basestring +from .cookies import extract_cookies_to_jar +from ._internal_utils import to_native_string +from .utils import parse_dict_header + +CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' +CONTENT_TYPE_MULTI_PART = 'multipart/form-data' + + +def _basic_auth_str(username, password): + """Returns a Basic Auth string.""" + + # "I want us to put a big-ol' comment on top of it that + # says that this behaviour is dumb but we need to preserve + # it because people are relying on it." + # - Lukasa + # + # These are here solely to maintain backwards compatibility + # for things like ints. This will be removed in 3.0.0. + if not isinstance(username, basestring): + warnings.warn( + "Non-string usernames will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(username), + category=DeprecationWarning, + ) + username = str(username) + + if not isinstance(password, basestring): + warnings.warn( + "Non-string passwords will no longer be supported in Requests " + "3.0.0. Please convert the object you've passed in ({!r}) to " + "a string or bytes object in the near future to avoid " + "problems.".format(type(password)), + category=DeprecationWarning, + ) + password = str(password) + # -- End Removal -- + + if isinstance(username, str): + username = username.encode('latin1') + + if isinstance(password, str): + password = password.encode('latin1') + + authstr = 'Basic ' + to_native_string( + b64encode(b':'.join((username, password))).strip() + ) + + return authstr + + +class AuthBase(object): + """Base class that all auth implementations derive from""" + + def __call__(self, r): + raise NotImplementedError('Auth hooks must be callable.') + + +class HTTPBasicAuth(AuthBase): + """Attaches HTTP Basic Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + + def __eq__(self, other): + return all([ + self.username == getattr(other, 'username', None), + self.password == getattr(other, 'password', None) + ]) + + def __ne__(self, other): + return not self == other + + def __call__(self, r): + r.headers['Authorization'] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPProxyAuth(HTTPBasicAuth): + """Attaches HTTP Proxy Authentication to a given Request object.""" + + def __call__(self, r): + r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) + return r + + +class HTTPDigestAuth(AuthBase): + """Attaches HTTP Digest Authentication to the given Request object.""" + + def __init__(self, username, password): + self.username = username + self.password = password + # Keep state in per-thread local storage + self._thread_local = threading.local() + + def init_per_thread_state(self): + # Ensure state is initialized just once per-thread + if not hasattr(self._thread_local, 'init'): + self._thread_local.init = True + self._thread_local.last_nonce = '' + self._thread_local.nonce_count = 0 + self._thread_local.chal = {} + self._thread_local.pos = None + self._thread_local.num_401_calls = None + + def build_digest_header(self, method, url): + """ + :rtype: str + """ + + realm = self._thread_local.chal['realm'] + nonce = self._thread_local.chal['nonce'] + qop = self._thread_local.chal.get('qop') + algorithm = self._thread_local.chal.get('algorithm') + opaque = self._thread_local.chal.get('opaque') + hash_utf8 = None + + if algorithm is None: + _algorithm = 'MD5' + else: + _algorithm = algorithm.upper() + # lambdas assume digest modules are imported at the top level + if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': + def md5_utf8(x): + if isinstance(x, str): + x = x.encode('utf-8') + return hashlib.md5(x).hexdigest() + hash_utf8 = md5_utf8 + elif _algorithm == 'SHA': + def sha_utf8(x): + if isinstance(x, str): + x = x.encode('utf-8') + return hashlib.sha1(x).hexdigest() + hash_utf8 = sha_utf8 + elif _algorithm == 'SHA-256': + def sha256_utf8(x): + if isinstance(x, str): + x = x.encode('utf-8') + return hashlib.sha256(x).hexdigest() + hash_utf8 = sha256_utf8 + elif _algorithm == 'SHA-512': + def sha512_utf8(x): + if isinstance(x, str): + x = x.encode('utf-8') + return hashlib.sha512(x).hexdigest() + hash_utf8 = sha512_utf8 + + KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) + + if hash_utf8 is None: + return None + + # XXX not implemented yet + entdig = None + p_parsed = urlparse(url) + #: path is request-uri defined in RFC 2616 which should not be empty + path = p_parsed.path or "/" + if p_parsed.query: + path += '?' + p_parsed.query + + A1 = '%s:%s:%s' % (self.username, realm, self.password) + A2 = '%s:%s' % (method, path) + + HA1 = hash_utf8(A1) + HA2 = hash_utf8(A2) + + if nonce == self._thread_local.last_nonce: + self._thread_local.nonce_count += 1 + else: + self._thread_local.nonce_count = 1 + ncvalue = '%08x' % self._thread_local.nonce_count + s = str(self._thread_local.nonce_count).encode('utf-8') + s += nonce.encode('utf-8') + s += time.ctime().encode('utf-8') + s += os.urandom(8) + + cnonce = (hashlib.sha1(s).hexdigest()[:16]) + if _algorithm == 'MD5-SESS': + HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) + + if not qop: + respdig = KD(HA1, "%s:%s" % (nonce, HA2)) + elif qop == 'auth' or 'auth' in qop.split(','): + noncebit = "%s:%s:%s:%s:%s" % ( + nonce, ncvalue, cnonce, 'auth', HA2 + ) + respdig = KD(HA1, noncebit) + else: + # XXX handle auth-int. + return None + + self._thread_local.last_nonce = nonce + + # XXX should the partial digests be encoded too? + base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ + 'response="%s"' % (self.username, realm, nonce, path, respdig) + if opaque: + base += ', opaque="%s"' % opaque + if algorithm: + base += ', algorithm="%s"' % algorithm + if entdig: + base += ', digest="%s"' % entdig + if qop: + base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) + + return 'Digest %s' % (base) + + def handle_redirect(self, r, **kwargs): + """Reset num_401_calls counter on redirects.""" + if r.is_redirect: + self._thread_local.num_401_calls = 1 + + def handle_401(self, r, **kwargs): + """ + Takes the given response and tries digest-auth, if needed. + + :rtype: requests.Response + """ + + # If response is not 4xx, do not auth + # See https://github.com/psf/requests/issues/3772 + if not 400 <= r.status_code < 500: + self._thread_local.num_401_calls = 1 + return r + + if self._thread_local.pos is not None: + # Rewind the file position indicator of the body to where + # it was to resend the request. + r.request.body.seek(self._thread_local.pos) + s_auth = r.headers.get('www-authenticate', '') + + if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2: + + self._thread_local.num_401_calls += 1 + pat = re.compile(r'digest ', flags=re.IGNORECASE) + self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1)) + + # Consume content and release the original connection + # to allow our new request to reuse the same one. + r.content + r.close() + prep = r.request.copy() + extract_cookies_to_jar(prep._cookies, r.request, r.raw) + prep.prepare_cookies(prep._cookies) + + prep.headers['Authorization'] = self.build_digest_header( + prep.method, prep.url) + _r = r.connection.send(prep, **kwargs) + _r.history.append(r) + _r.request = prep + + return _r + + self._thread_local.num_401_calls = 1 + return r + + def __call__(self, r): + # Initialize per-thread state, if needed + self.init_per_thread_state() + # If we have a saved nonce, skip the 401 + if self._thread_local.last_nonce: + r.headers['Authorization'] = self.build_digest_header(r.method, r.url) + try: + self._thread_local.pos = r.body.tell() + except AttributeError: + # In the case of HTTPDigestAuth being reused and the body of + # the previous request was a file-like object, pos has the + # file position of the previous body. Ensure it's set to + # None. + self._thread_local.pos = None + r.register_hook('response', self.handle_401) + r.register_hook('response', self.handle_redirect) + self._thread_local.num_401_calls = 1 + + return r + + def __eq__(self, other): + return all([ + self.username == getattr(other, 'username', None), + self.password == getattr(other, 'password', None) + ]) + + def __ne__(self, other): + return not self == other diff --git a/requests/certs.py b/requests/certs.py new file mode 100644 index 0000000..d1a378d --- /dev/null +++ b/requests/certs.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +requests.certs +~~~~~~~~~~~~~~ + +This module returns the preferred default CA certificate bundle. There is +only one — the one from the certifi package. + +If you are packaging Requests, e.g., for a Linux distribution or a managed +environment, you can change the definition of where() to return a separately +packaged CA bundle. +""" +from certifi import where + +if __name__ == '__main__': + print(where()) diff --git a/requests/compat.py b/requests/compat.py new file mode 100644 index 0000000..5de0769 --- /dev/null +++ b/requests/compat.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +""" +requests.compat +~~~~~~~~~~~~~~~ + +This module handles import compatibility issues between Python 2 and +Python 3. +""" + +import chardet + +import sys + +# ------- +# Pythons +# ------- + +# Syntax sugar. +_ver = sys.version_info + +#: Python 2.x? +is_py2 = (_ver[0] == 2) + +#: Python 3.x? +is_py3 = (_ver[0] == 3) + +try: + import simplejson as json +except ImportError: + import json + +# --------- +# Specifics +# --------- + +if is_py2: + from urllib import ( + quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, + proxy_bypass, proxy_bypass_environment, getproxies_environment) + from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag + from urllib2 import parse_http_list + import cookielib + from Cookie import Morsel + from StringIO import StringIO + # Keep OrderedDict for backwards compatibility. + from collections import Callable, Mapping, MutableMapping, OrderedDict + + + builtin_str = str + bytes = str + str = unicode + basestring = basestring + numeric_types = (int, long, float) + integer_types = (int, long) + +elif is_py3: + from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag + from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment + from http import cookiejar as cookielib + from http.cookies import Morsel + from io import StringIO + # Keep OrderedDict for backwards compatibility. + from collections import OrderedDict + from collections.abc import Callable, Mapping, MutableMapping + + builtin_str = str + str = str + bytes = bytes + basestring = (str, bytes) + numeric_types = (int, float) + integer_types = (int,) diff --git a/requests/cookies.py b/requests/cookies.py new file mode 100644 index 0000000..56fccd9 --- /dev/null +++ b/requests/cookies.py @@ -0,0 +1,549 @@ +# -*- coding: utf-8 -*- + +""" +requests.cookies +~~~~~~~~~~~~~~~~ + +Compatibility code to be able to use `cookielib.CookieJar` with requests. + +requests.utils imports from here, so be careful with imports. +""" + +import copy +import time +import calendar + +from ._internal_utils import to_native_string +from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping + +try: + import threading +except ImportError: + import dummy_threading as threading + + +class MockRequest(object): + """Wraps a `requests.Request` to mimic a `urllib2.Request`. + + The code in `cookielib.CookieJar` expects this interface in order to correctly + manage cookie policies, i.e., determine whether a cookie can be set, given the + domains of the request and the cookie. + + The original request object is read-only. The client is responsible for collecting + the new headers via `get_new_headers()` and interpreting them appropriately. You + probably want `get_cookie_header`, defined below. + """ + + def __init__(self, request): + self._r = request + self._new_headers = {} + self.type = urlparse(self._r.url).scheme + + def get_type(self): + return self.type + + def get_host(self): + return urlparse(self._r.url).netloc + + def get_origin_req_host(self): + return self.get_host() + + def get_full_url(self): + # Only return the response's URL if the user hadn't set the Host + # header + if not self._r.headers.get('Host'): + return self._r.url + # If they did set it, retrieve it and reconstruct the expected domain + host = to_native_string(self._r.headers['Host'], encoding='utf-8') + parsed = urlparse(self._r.url) + # Reconstruct the URL as we expect it + return urlunparse([ + parsed.scheme, host, parsed.path, parsed.params, parsed.query, + parsed.fragment + ]) + + def is_unverifiable(self): + return True + + def has_header(self, name): + return name in self._r.headers or name in self._new_headers + + def get_header(self, name, default=None): + return self._r.headers.get(name, self._new_headers.get(name, default)) + + def add_header(self, key, val): + """cookielib has no legitimate use for this method; add it back if you find one.""" + raise NotImplementedError("Cookie headers should be added with add_unredirected_header()") + + def add_unredirected_header(self, name, value): + self._new_headers[name] = value + + def get_new_headers(self): + return self._new_headers + + @property + def unverifiable(self): + return self.is_unverifiable() + + @property + def origin_req_host(self): + return self.get_origin_req_host() + + @property + def host(self): + return self.get_host() + + +class MockResponse(object): + """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`. + + ...what? Basically, expose the parsed HTTP headers from the server response + the way `cookielib` expects to see them. + """ + + def __init__(self, headers): + """Make a MockResponse for `cookielib` to read. + + :param headers: a httplib.HTTPMessage or analogous carrying the headers + """ + self._headers = headers + + def info(self): + return self._headers + + def getheaders(self, name): + self._headers.getheaders(name) + + +def extract_cookies_to_jar(jar, request, response): + """Extract the cookies from the response into a CookieJar. + + :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar) + :param request: our own requests.Request object + :param response: urllib3.HTTPResponse object + """ + if not (hasattr(response, '_original_response') and + response._original_response): + return + # the _original_response field is the wrapped httplib.HTTPResponse object, + req = MockRequest(request) + # pull out the HTTPMessage with the headers and put it in the mock: + res = MockResponse(response._original_response.msg) + jar.extract_cookies(res, req) + + +def get_cookie_header(jar, request): + """ + Produce an appropriate Cookie header string to be sent with `request`, or None. + + :rtype: str + """ + r = MockRequest(request) + jar.add_cookie_header(r) + return r.get_new_headers().get('Cookie') + + +def remove_cookie_by_name(cookiejar, name, domain=None, path=None): + """Unsets a cookie by name, by default over all domains and paths. + + Wraps CookieJar.clear(), is O(n). + """ + clearables = [] + for cookie in cookiejar: + if cookie.name != name: + continue + if domain is not None and domain != cookie.domain: + continue + if path is not None and path != cookie.path: + continue + clearables.append((cookie.domain, cookie.path, cookie.name)) + + for domain, path, name in clearables: + cookiejar.clear(domain, path, name) + + +class CookieConflictError(RuntimeError): + """There are two cookies that meet the criteria specified in the cookie jar. + Use .get and .set and include domain and path args in order to be more specific. + """ + + +class RequestsCookieJar(cookielib.CookieJar, MutableMapping): + """Compatibility class; is a cookielib.CookieJar, but exposes a dict + interface. + + This is the CookieJar we create by default for requests and sessions that + don't specify one, since some clients may expect response.cookies and + session.cookies to support dict operations. + + Requests does not use the dict interface internally; it's just for + compatibility with external client code. All requests code should work + out of the box with externally provided instances of ``CookieJar``, e.g. + ``LWPCookieJar`` and ``FileCookieJar``. + + Unlike a regular CookieJar, this class is pickleable. + + .. warning:: dictionary operations that are normally O(1) may be O(n). + """ + + def get(self, name, default=None, domain=None, path=None): + """Dict-like get() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + + .. warning:: operation is O(n), not O(1). + """ + try: + return self._find_no_duplicates(name, domain, path) + except KeyError: + return default + + def set(self, name, value, **kwargs): + """Dict-like set() that also supports optional domain and path args in + order to resolve naming collisions from using one cookie jar over + multiple domains. + """ + # support client code that unsets cookies by assignment of a None value: + if value is None: + remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) + return + + if isinstance(value, Morsel): + c = morsel_to_cookie(value) + else: + c = create_cookie(name, value, **kwargs) + self.set_cookie(c) + return c + + def iterkeys(self): + """Dict-like iterkeys() that returns an iterator of names of cookies + from the jar. + + .. seealso:: itervalues() and iteritems(). + """ + for cookie in iter(self): + yield cookie.name + + def keys(self): + """Dict-like keys() that returns a list of names of cookies from the + jar. + + .. seealso:: values() and items(). + """ + return list(self.iterkeys()) + + def itervalues(self): + """Dict-like itervalues() that returns an iterator of values of cookies + from the jar. + + .. seealso:: iterkeys() and iteritems(). + """ + for cookie in iter(self): + yield cookie.value + + def values(self): + """Dict-like values() that returns a list of values of cookies from the + jar. + + .. seealso:: keys() and items(). + """ + return list(self.itervalues()) + + def iteritems(self): + """Dict-like iteritems() that returns an iterator of name-value tuples + from the jar. + + .. seealso:: iterkeys() and itervalues(). + """ + for cookie in iter(self): + yield cookie.name, cookie.value + + def items(self): + """Dict-like items() that returns a list of name-value tuples from the + jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a + vanilla python dict of key value pairs. + + .. seealso:: keys() and values(). + """ + return list(self.iteritems()) + + def list_domains(self): + """Utility method to list all the domains in the jar.""" + domains = [] + for cookie in iter(self): + if cookie.domain not in domains: + domains.append(cookie.domain) + return domains + + def list_paths(self): + """Utility method to list all the paths in the jar.""" + paths = [] + for cookie in iter(self): + if cookie.path not in paths: + paths.append(cookie.path) + return paths + + def multiple_domains(self): + """Returns True if there are multiple domains in the jar. + Returns False otherwise. + + :rtype: bool + """ + domains = [] + for cookie in iter(self): + if cookie.domain is not None and cookie.domain in domains: + return True + domains.append(cookie.domain) + return False # there is only one domain in jar + + def get_dict(self, domain=None, path=None): + """Takes as an argument an optional domain and path and returns a plain + old Python dict of name-value pairs of cookies that meet the + requirements. + + :rtype: dict + """ + dictionary = {} + for cookie in iter(self): + if ( + (domain is None or cookie.domain == domain) and + (path is None or cookie.path == path) + ): + dictionary[cookie.name] = cookie.value + return dictionary + + def __contains__(self, name): + try: + return super(RequestsCookieJar, self).__contains__(name) + except CookieConflictError: + return True + + def __getitem__(self, name): + """Dict-like __getitem__() for compatibility with client code. Throws + exception if there are more than one cookie with name. In that case, + use the more explicit get() method instead. + + .. warning:: operation is O(n), not O(1). + """ + return self._find_no_duplicates(name) + + def __setitem__(self, name, value): + """Dict-like __setitem__ for compatibility with client code. Throws + exception if there is already a cookie of that name in the jar. In that + case, use the more explicit set() method instead. + """ + self.set(name, value) + + def __delitem__(self, name): + """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s + ``remove_cookie_by_name()``. + """ + remove_cookie_by_name(self, name) + + def set_cookie(self, cookie, *args, **kwargs): + if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'): + cookie.value = cookie.value.replace('\\"', '') + return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs) + + def update(self, other): + """Updates this jar with cookies from another CookieJar or dict-like""" + if isinstance(other, cookielib.CookieJar): + for cookie in other: + self.set_cookie(copy.copy(cookie)) + else: + super(RequestsCookieJar, self).update(other) + + def _find(self, name, domain=None, path=None): + """Requests uses this method internally to get cookie values. + + If there are conflicting cookies, _find arbitrarily chooses one. + See _find_no_duplicates if you want an exception thrown if there are + conflicting cookies. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :return: cookie.value + """ + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + return cookie.value + + raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) + + def _find_no_duplicates(self, name, domain=None, path=None): + """Both ``__get_item__`` and ``get`` call this function: it's never + used elsewhere in Requests. + + :param name: a string containing name of cookie + :param domain: (optional) string containing domain of cookie + :param path: (optional) string containing path of cookie + :raises KeyError: if cookie is not found + :raises CookieConflictError: if there are multiple cookies + that match name and optionally domain and path + :return: cookie.value + """ + toReturn = None + for cookie in iter(self): + if cookie.name == name: + if domain is None or cookie.domain == domain: + if path is None or cookie.path == path: + if toReturn is not None: # if there are multiple cookies that meet passed in criteria + raise CookieConflictError('There are multiple cookies with name, %r' % (name)) + toReturn = cookie.value # we will eventually return this as long as no cookie conflict + + if toReturn: + return toReturn + raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) + + def __getstate__(self): + """Unlike a normal CookieJar, this class is pickleable.""" + state = self.__dict__.copy() + # remove the unpickleable RLock object + state.pop('_cookies_lock') + return state + + def __setstate__(self, state): + """Unlike a normal CookieJar, this class is pickleable.""" + self.__dict__.update(state) + if '_cookies_lock' not in self.__dict__: + self._cookies_lock = threading.RLock() + + def copy(self): + """Return a copy of this RequestsCookieJar.""" + new_cj = RequestsCookieJar() + new_cj.set_policy(self.get_policy()) + new_cj.update(self) + return new_cj + + def get_policy(self): + """Return the CookiePolicy instance used.""" + return self._policy + + +def _copy_cookie_jar(jar): + if jar is None: + return None + + if hasattr(jar, 'copy'): + # We're dealing with an instance of RequestsCookieJar + return jar.copy() + # We're dealing with a generic CookieJar instance + new_jar = copy.copy(jar) + new_jar.clear() + for cookie in jar: + new_jar.set_cookie(copy.copy(cookie)) + return new_jar + + +def create_cookie(name, value, **kwargs): + """Make a cookie from underspecified parameters. + + By default, the pair of `name` and `value` will be set for the domain '' + and sent on every request (this is sometimes called a "supercookie"). + """ + result = { + 'version': 0, + 'name': name, + 'value': value, + 'port': None, + 'domain': '', + 'path': '/', + 'secure': False, + 'expires': None, + 'discard': True, + 'comment': None, + 'comment_url': None, + 'rest': {'HttpOnly': None}, + 'rfc2109': False, + } + + badargs = set(kwargs) - set(result) + if badargs: + err = 'create_cookie() got unexpected keyword arguments: %s' + raise TypeError(err % list(badargs)) + + result.update(kwargs) + result['port_specified'] = bool(result['port']) + result['domain_specified'] = bool(result['domain']) + result['domain_initial_dot'] = result['domain'].startswith('.') + result['path_specified'] = bool(result['path']) + + return cookielib.Cookie(**result) + + +def morsel_to_cookie(morsel): + """Convert a Morsel object into a Cookie containing the one k/v pair.""" + + expires = None + if morsel['max-age']: + try: + expires = int(time.time() + int(morsel['max-age'])) + except ValueError: + raise TypeError('max-age: %s must be integer' % morsel['max-age']) + elif morsel['expires']: + time_template = '%a, %d-%b-%Y %H:%M:%S GMT' + expires = calendar.timegm( + time.strptime(morsel['expires'], time_template) + ) + return create_cookie( + comment=morsel['comment'], + comment_url=bool(morsel['comment']), + discard=False, + domain=morsel['domain'], + expires=expires, + name=morsel.key, + path=morsel['path'], + port=None, + rest={'HttpOnly': morsel['httponly']}, + rfc2109=False, + secure=bool(morsel['secure']), + value=morsel.value, + version=morsel['version'] or 0, + ) + + +def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): + """Returns a CookieJar from a key/value dictionary. + + :param cookie_dict: Dict of key/values to insert into CookieJar. + :param cookiejar: (optional) A cookiejar to add the cookies to. + :param overwrite: (optional) If False, will not replace cookies + already in the jar with new ones. + :rtype: CookieJar + """ + if cookiejar is None: + cookiejar = RequestsCookieJar() + + if cookie_dict is not None: + names_from_jar = [cookie.name for cookie in cookiejar] + for name in cookie_dict: + if overwrite or (name not in names_from_jar): + cookiejar.set_cookie(create_cookie(name, cookie_dict[name])) + + return cookiejar + + +def merge_cookies(cookiejar, cookies): + """Add cookies to cookiejar and returns a merged CookieJar. + + :param cookiejar: CookieJar object to add the cookies to. + :param cookies: Dictionary or CookieJar object to be added. + :rtype: CookieJar + """ + if not isinstance(cookiejar, cookielib.CookieJar): + raise ValueError('You can only merge into CookieJar') + + if isinstance(cookies, dict): + cookiejar = cookiejar_from_dict( + cookies, cookiejar=cookiejar, overwrite=False) + elif isinstance(cookies, cookielib.CookieJar): + try: + cookiejar.update(cookies) + except AttributeError: + for cookie_in_jar in cookies: + cookiejar.set_cookie(cookie_in_jar) + + return cookiejar diff --git a/requests/exceptions.py b/requests/exceptions.py new file mode 100644 index 0000000..a80cad8 --- /dev/null +++ b/requests/exceptions.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- + +""" +requests.exceptions +~~~~~~~~~~~~~~~~~~~ + +This module contains the set of Requests' exceptions. +""" +from urllib3.exceptions import HTTPError as BaseHTTPError + + +class RequestException(IOError): + """There was an ambiguous exception that occurred while handling your + request. + """ + + def __init__(self, *args, **kwargs): + """Initialize RequestException with `request` and `response` objects.""" + response = kwargs.pop('response', None) + self.response = response + self.request = kwargs.pop('request', None) + if (response is not None and not self.request and + hasattr(response, 'request')): + self.request = self.response.request + super(RequestException, self).__init__(*args, **kwargs) + + +class HTTPError(RequestException): + """An HTTP error occurred.""" + + +class ConnectionError(RequestException): + """A Connection error occurred.""" + + +class ProxyError(ConnectionError): + """A proxy error occurred.""" + + +class SSLError(ConnectionError): + """An SSL error occurred.""" + + +class Timeout(RequestException): + """The request timed out. + + Catching this error will catch both + :exc:`~requests.exceptions.ConnectTimeout` and + :exc:`~requests.exceptions.ReadTimeout` errors. + """ + + +class ConnectTimeout(ConnectionError, Timeout): + """The request timed out while trying to connect to the remote server. + + Requests that produced this error are safe to retry. + """ + + +class ReadTimeout(Timeout): + """The server did not send any data in the allotted amount of time.""" + + +class URLRequired(RequestException): + """A valid URL is required to make a request.""" + + +class TooManyRedirects(RequestException): + """Too many redirects.""" + + +class MissingSchema(RequestException, ValueError): + """The URL schema (e.g. http or https) is missing.""" + + +class InvalidSchema(RequestException, ValueError): + """See defaults.py for valid schemas.""" + + +class InvalidURL(RequestException, ValueError): + """The URL provided was somehow invalid.""" + + +class InvalidHeader(RequestException, ValueError): + """The header value provided was somehow invalid.""" + + +class InvalidProxyURL(InvalidURL): + """The proxy URL provided is invalid.""" + + +class ChunkedEncodingError(RequestException): + """The server declared chunked encoding but sent an invalid chunk.""" + + +class ContentDecodingError(RequestException, BaseHTTPError): + """Failed to decode response content""" + + +class StreamConsumedError(RequestException, TypeError): + """The content for this response was already consumed""" + + +class RetryError(RequestException): + """Custom retries logic failed""" + + +class UnrewindableBodyError(RequestException): + """Requests encountered an error when trying to rewind a body""" + +# Warnings + + +class RequestsWarning(Warning): + """Base warning for Requests.""" + pass + + +class FileModeWarning(RequestsWarning, DeprecationWarning): + """A file was opened in text mode, but Requests determined its binary length.""" + pass + + +class RequestsDependencyWarning(RequestsWarning): + """An imported dependency doesn't match the expected version range.""" + pass diff --git a/requests/help.py b/requests/help.py new file mode 100644 index 0000000..e53d35e --- /dev/null +++ b/requests/help.py @@ -0,0 +1,119 @@ +"""Module containing bug report helper(s).""" +from __future__ import print_function + +import json +import platform +import sys +import ssl + +import idna +import urllib3 +import chardet + +from . import __version__ as requests_version + +try: + from urllib3.contrib import pyopenssl +except ImportError: + pyopenssl = None + OpenSSL = None + cryptography = None +else: + import OpenSSL + import cryptography + + +def _implementation(): + """Return a dict with the Python implementation and version. + + Provide both the name and the version of the Python implementation + currently running. For example, on CPython 2.7.5 it will return + {'name': 'CPython', 'version': '2.7.5'}. + + This function works best on CPython and PyPy: in particular, it probably + doesn't work for Jython or IronPython. Future investigation should be done + to work out the correct shape of the code for those platforms. + """ + implementation = platform.python_implementation() + + if implementation == 'CPython': + implementation_version = platform.python_version() + elif implementation == 'PyPy': + implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major, + sys.pypy_version_info.minor, + sys.pypy_version_info.micro) + if sys.pypy_version_info.releaselevel != 'final': + implementation_version = ''.join([ + implementation_version, sys.pypy_version_info.releaselevel + ]) + elif implementation == 'Jython': + implementation_version = platform.python_version() # Complete Guess + elif implementation == 'IronPython': + implementation_version = platform.python_version() # Complete Guess + else: + implementation_version = 'Unknown' + + return {'name': implementation, 'version': implementation_version} + + +def info(): + """Generate information for a bug report.""" + try: + platform_info = { + 'system': platform.system(), + 'release': platform.release(), + } + except IOError: + platform_info = { + 'system': 'Unknown', + 'release': 'Unknown', + } + + implementation_info = _implementation() + urllib3_info = {'version': urllib3.__version__} + chardet_info = {'version': chardet.__version__} + + pyopenssl_info = { + 'version': None, + 'openssl_version': '', + } + if OpenSSL: + pyopenssl_info = { + 'version': OpenSSL.__version__, + 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER, + } + cryptography_info = { + 'version': getattr(cryptography, '__version__', ''), + } + idna_info = { + 'version': getattr(idna, '__version__', ''), + } + + system_ssl = ssl.OPENSSL_VERSION_NUMBER + system_ssl_info = { + 'version': '%x' % system_ssl if system_ssl is not None else '' + } + + return { + 'platform': platform_info, + 'implementation': implementation_info, + 'system_ssl': system_ssl_info, + 'using_pyopenssl': pyopenssl is not None, + 'pyOpenSSL': pyopenssl_info, + 'urllib3': urllib3_info, + 'chardet': chardet_info, + 'cryptography': cryptography_info, + 'idna': idna_info, + 'requests': { + 'version': requests_version, + }, + } + + +def main(): + """Pretty-print the bug information as JSON.""" + print(json.dumps(info(), sort_keys=True, indent=2)) + + +if __name__ == '__main__': + main() diff --git a/requests/hooks.py b/requests/hooks.py new file mode 100644 index 0000000..7a51f21 --- /dev/null +++ b/requests/hooks.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +""" +requests.hooks +~~~~~~~~~~~~~~ + +This module provides the capabilities for the Requests hooks system. + +Available hooks: + +``response``: + The response generated from a Request. +""" +HOOKS = ['response'] + + +def default_hooks(): + return {event: [] for event in HOOKS} + +# TODO: response is the only one + + +def dispatch_hook(key, hooks, hook_data, **kwargs): + """Dispatches a hook dictionary on a given piece of data.""" + hooks = hooks or {} + hooks = hooks.get(key) + if hooks: + if hasattr(hooks, '__call__'): + hooks = [hooks] + for hook in hooks: + _hook_data = hook(hook_data, **kwargs) + if _hook_data is not None: + hook_data = _hook_data + return hook_data diff --git a/requests/models.py b/requests/models.py new file mode 100644 index 0000000..3579883 --- /dev/null +++ b/requests/models.py @@ -0,0 +1,954 @@ +# -*- coding: utf-8 -*- + +""" +requests.models +~~~~~~~~~~~~~~~ + +This module contains the primary objects that power Requests. +""" + +import datetime +import sys + +# Import encoding now, to avoid implicit import later. +# Implicit import within threads may cause LookupError when standard library is in a ZIP, +# such as in Embedded Python. See https://github.com/psf/requests/issues/3578. +import encodings.idna + +from urllib3.fields import RequestField +from urllib3.filepost import encode_multipart_formdata +from urllib3.util import parse_url +from urllib3.exceptions import ( + DecodeError, ReadTimeoutError, ProtocolError, LocationParseError) + +from io import UnsupportedOperation +from .hooks import default_hooks +from .structures import CaseInsensitiveDict + +from .auth import HTTPBasicAuth +from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar +from .exceptions import ( + HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError, + ContentDecodingError, ConnectionError, StreamConsumedError) +from ._internal_utils import to_native_string, unicode_is_ascii +from .utils import ( + guess_filename, get_auth_from_url, requote_uri, + stream_decode_response_unicode, to_key_val_list, parse_header_links, + iter_slices, guess_json_utf, super_len, check_header_validity) +from .compat import ( + Callable, Mapping, + cookielib, urlunparse, urlsplit, urlencode, str, bytes, + is_py2, chardet, builtin_str, basestring) +from .compat import json as complexjson +from .status_codes import codes + +#: The set of HTTP status codes that indicate an automatically +#: processable redirect. +REDIRECT_STATI = ( + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 + codes.temporary_redirect, # 307 + codes.permanent_redirect, # 308 +) + +DEFAULT_REDIRECT_LIMIT = 30 +CONTENT_CHUNK_SIZE = 10 * 1024 +ITER_CHUNK_SIZE = 512 + + +class RequestEncodingMixin(object): + @property + def path_url(self): + """Build the path URL to use.""" + + url = [] + + p = urlsplit(self.url) + + path = p.path + if not path: + path = '/' + + url.append(path) + + query = p.query + if query: + url.append('?') + url.append(query) + + return ''.join(url) + + @staticmethod + def _encode_params(data): + """Encode parameters in a piece of data. + + Will successfully encode parameters when passed as a dict or a list of + 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary + if parameters are supplied as a dict. + """ + + if isinstance(data, (str, bytes)): + return data + elif hasattr(data, 'read'): + return data + elif hasattr(data, '__iter__'): + result = [] + for k, vs in to_key_val_list(data): + if isinstance(vs, basestring) or not hasattr(vs, '__iter__'): + vs = [vs] + for v in vs: + if v is not None: + result.append( + (k.encode('utf-8') if isinstance(k, str) else k, + v.encode('utf-8') if isinstance(v, str) else v)) + return urlencode(result, doseq=True) + else: + return data + + @staticmethod + def _encode_files(files, data): + """Build the body for a multipart/form-data request. + + Will successfully encode files when passed as a dict or a list of + tuples. Order is retained if data is a list of tuples but arbitrary + if parameters are supplied as a dict. + The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) + or 4-tuples (filename, fileobj, contentype, custom_headers). + """ + if (not files): + raise ValueError("Files must be provided.") + elif isinstance(data, basestring): + raise ValueError("Data must not be a string.") + + new_fields = [] + fields = to_key_val_list(data or {}) + files = to_key_val_list(files or {}) + + for field, val in fields: + if isinstance(val, basestring) or not hasattr(val, '__iter__'): + val = [val] + for v in val: + if v is not None: + # Don't call str() on bytestrings: in Py3 it all goes wrong. + if not isinstance(v, bytes): + v = str(v) + + new_fields.append( + (field.decode('utf-8') if isinstance(field, bytes) else field, + v.encode('utf-8') if isinstance(v, str) else v)) + + for (k, v) in files: + # support for explicit filename + ft = None + fh = None + if isinstance(v, (tuple, list)): + if len(v) == 2: + fn, fp = v + elif len(v) == 3: + fn, fp, ft = v + else: + fn, fp, ft, fh = v + else: + fn = guess_filename(v) or k + fp = v + + if isinstance(fp, (str, bytes, bytearray)): + fdata = fp + elif hasattr(fp, 'read'): + fdata = fp.read() + elif fp is None: + continue + else: + fdata = fp + + rf = RequestField(name=k, data=fdata, filename=fn, headers=fh) + rf.make_multipart(content_type=ft) + new_fields.append(rf) + + body, content_type = encode_multipart_formdata(new_fields) + + return body, content_type + + +class RequestHooksMixin(object): + def register_hook(self, event, hook): + """Properly register a hook.""" + + if event not in self.hooks: + raise ValueError('Unsupported event specified, with event name "%s"' % (event)) + + if isinstance(hook, Callable): + self.hooks[event].append(hook) + elif hasattr(hook, '__iter__'): + self.hooks[event].extend(h for h in hook if isinstance(h, Callable)) + + def deregister_hook(self, event, hook): + """Deregister a previously registered hook. + Returns True if the hook existed, False if not. + """ + + try: + self.hooks[event].remove(hook) + return True + except ValueError: + return False + + +class Request(RequestHooksMixin): + """A user-created :class:`Request ` object. + + Used to prepare a :class:`PreparedRequest `, which is sent to the server. + + :param method: HTTP method to use. + :param url: URL to send. + :param headers: dictionary of headers to send. + :param files: dictionary of {filename: fileobject} files to multipart upload. + :param data: the body to attach to the request. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param json: json for the body to attach to the request (if files or data is not specified). + :param params: URL parameters to append to the URL. If a dictionary or + list of tuples ``[(key, value)]`` is provided, form-encoding will + take place. + :param auth: Auth handler or (user, pass) tuple. + :param cookies: dictionary or CookieJar of cookies to attach to this request. + :param hooks: dictionary of callback hooks, for internal usage. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> req.prepare() + + """ + + def __init__(self, + method=None, url=None, headers=None, files=None, data=None, + params=None, auth=None, cookies=None, hooks=None, json=None): + + # Default empty dicts for dict params. + data = [] if data is None else data + files = [] if files is None else files + headers = {} if headers is None else headers + params = {} if params is None else params + hooks = {} if hooks is None else hooks + + self.hooks = default_hooks() + for (k, v) in list(hooks.items()): + self.register_hook(event=k, hook=v) + + self.method = method + self.url = url + self.headers = headers + self.files = files + self.data = data + self.json = json + self.params = params + self.auth = auth + self.cookies = cookies + + def __repr__(self): + return '' % (self.method) + + def prepare(self): + """Constructs a :class:`PreparedRequest ` for transmission and returns it.""" + p = PreparedRequest() + p.prepare( + method=self.method, + url=self.url, + headers=self.headers, + files=self.files, + data=self.data, + json=self.json, + params=self.params, + auth=self.auth, + cookies=self.cookies, + hooks=self.hooks, + ) + return p + + +class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): + """The fully mutable :class:`PreparedRequest ` object, + containing the exact bytes that will be sent to the server. + + Generated from either a :class:`Request ` object or manually. + + Usage:: + + >>> import requests + >>> req = requests.Request('GET', 'https://httpbin.org/get') + >>> r = req.prepare() + >>> r + + + >>> s = requests.Session() + >>> s.send(r) + + """ + + def __init__(self): + #: HTTP verb to send to the server. + self.method = None + #: HTTP URL to send the request to. + self.url = None + #: dictionary of HTTP headers. + self.headers = None + # The `CookieJar` used to create the Cookie header will be stored here + # after prepare_cookies is called + self._cookies = None + #: request body to send to the server. + self.body = None + #: dictionary of callback hooks, for internal usage. + self.hooks = default_hooks() + #: integer denoting starting position of a readable file-like body. + self._body_position = None + + def prepare(self, + method=None, url=None, headers=None, files=None, data=None, + params=None, auth=None, cookies=None, hooks=None, json=None): + """Prepares the entire request with the given parameters.""" + + self.prepare_method(method) + self.prepare_url(url, params) + self.prepare_headers(headers) + self.prepare_cookies(cookies) + self.prepare_body(data, files, json) + self.prepare_auth(auth, url) + + # Note that prepare_auth must be last to enable authentication schemes + # such as OAuth to work on a fully prepared request. + + # This MUST go after prepare_auth. Authenticators could add a hook + self.prepare_hooks(hooks) + + def __repr__(self): + return '' % (self.method) + + def copy(self): + p = PreparedRequest() + p.method = self.method + p.url = self.url + p.headers = self.headers.copy() if self.headers is not None else None + p._cookies = _copy_cookie_jar(self._cookies) + p.body = self.body + p.hooks = self.hooks + p._body_position = self._body_position + return p + + def prepare_method(self, method): + """Prepares the given HTTP method.""" + self.method = method + if self.method is not None: + self.method = to_native_string(self.method.upper()) + + @staticmethod + def _get_idna_encoded_host(host): + import idna + + try: + host = idna.encode(host, uts46=True).decode('utf-8') + except idna.IDNAError: + raise UnicodeError + return host + + def prepare_url(self, url, params): + """Prepares the given HTTP URL.""" + #: Accept objects that have string representations. + #: We're unable to blindly call unicode/str functions + #: as this will include the bytestring indicator (b'') + #: on python 3.x. + #: https://github.com/psf/requests/pull/2238 + if isinstance(url, bytes): + url = url.decode('utf8') + else: + url = unicode(url) if is_py2 else str(url) + + # Remove leading whitespaces from url + url = url.lstrip() + + # Don't do any URL preparation for non-HTTP schemes like `mailto`, + # `data` etc to work around exceptions from `url_parse`, which + # handles RFC 3986 only. + if ':' in url and not url.lower().startswith('http'): + self.url = url + return + + # Support for unicode domain names and paths. + try: + scheme, auth, host, port, path, query, fragment = parse_url(url) + except LocationParseError as e: + raise InvalidURL(*e.args) + + if not scheme: + error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?") + error = error.format(to_native_string(url, 'utf8')) + + raise MissingSchema(error) + + if not host: + raise InvalidURL("Invalid URL %r: No host supplied" % url) + + # In general, we want to try IDNA encoding the hostname if the string contains + # non-ASCII characters. This allows users to automatically get the correct IDNA + # behaviour. For strings containing only ASCII characters, we need to also verify + # it doesn't start with a wildcard (*), before allowing the unencoded hostname. + if not unicode_is_ascii(host): + try: + host = self._get_idna_encoded_host(host) + except UnicodeError: + raise InvalidURL('URL has an invalid label.') + elif host.startswith(u'*'): + raise InvalidURL('URL has an invalid label.') + + # Carefully reconstruct the network location + netloc = auth or '' + if netloc: + netloc += '@' + netloc += host + if port: + netloc += ':' + str(port) + + # Bare domains aren't valid URLs. + if not path: + path = '/' + + if is_py2: + if isinstance(scheme, str): + scheme = scheme.encode('utf-8') + if isinstance(netloc, str): + netloc = netloc.encode('utf-8') + if isinstance(path, str): + path = path.encode('utf-8') + if isinstance(query, str): + query = query.encode('utf-8') + if isinstance(fragment, str): + fragment = fragment.encode('utf-8') + + if isinstance(params, (str, bytes)): + params = to_native_string(params) + + enc_params = self._encode_params(params) + if enc_params: + if query: + query = '%s&%s' % (query, enc_params) + else: + query = enc_params + + url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment])) + self.url = url + + def prepare_headers(self, headers): + """Prepares the given HTTP headers.""" + + self.headers = CaseInsensitiveDict() + if headers: + for header in headers.items(): + # Raise exception on invalid header value. + check_header_validity(header) + name, value = header + self.headers[to_native_string(name)] = value + + def prepare_body(self, data, files, json=None): + """Prepares the given HTTP body data.""" + + # Check if file, fo, generator, iterator. + # If not, run through normal process. + + # Nottin' on you. + body = None + content_type = None + + if not data and json is not None: + # urllib3 requires a bytes-like body. Python 2's json.dumps + # provides this natively, but Python 3 gives a Unicode string. + content_type = 'application/json' + body = complexjson.dumps(json) + if not isinstance(body, bytes): + body = body.encode('utf-8') + + is_stream = all([ + hasattr(data, '__iter__'), + not isinstance(data, (basestring, list, tuple, Mapping)) + ]) + + try: + length = super_len(data) + except (TypeError, AttributeError, UnsupportedOperation): + length = None + + if is_stream: + body = data + + if getattr(body, 'tell', None) is not None: + # Record the current file position before reading. + # This will allow us to rewind a file in the event + # of a redirect. + try: + self._body_position = body.tell() + except (IOError, OSError): + # This differentiates from None, allowing us to catch + # a failed `tell()` later when trying to rewind the body + self._body_position = object() + + if files: + raise NotImplementedError('Streamed bodies and files are mutually exclusive.') + + if length: + self.headers['Content-Length'] = builtin_str(length) + else: + self.headers['Transfer-Encoding'] = 'chunked' + else: + # Multi-part file uploads. + if files: + (body, content_type) = self._encode_files(files, data) + else: + if data: + body = self._encode_params(data) + if isinstance(data, basestring) or hasattr(data, 'read'): + content_type = None + else: + content_type = 'application/x-www-form-urlencoded' + + self.prepare_content_length(body) + + # Add content-type if it wasn't explicitly provided. + if content_type and ('content-type' not in self.headers): + self.headers['Content-Type'] = content_type + + self.body = body + + def prepare_content_length(self, body): + """Prepare Content-Length header based on request method and body""" + if body is not None: + length = super_len(body) + if length: + # If length exists, set it. Otherwise, we fallback + # to Transfer-Encoding: chunked. + self.headers['Content-Length'] = builtin_str(length) + elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None: + # Set Content-Length to 0 for methods that can have a body + # but don't provide one. (i.e. not GET or HEAD) + self.headers['Content-Length'] = '0' + + def prepare_auth(self, auth, url=''): + """Prepares the given HTTP auth data.""" + + # If no Auth is explicitly provided, extract it from the URL first. + if auth is None: + url_auth = get_auth_from_url(self.url) + auth = url_auth if any(url_auth) else None + + if auth: + if isinstance(auth, tuple) and len(auth) == 2: + # special-case basic HTTP auth + auth = HTTPBasicAuth(*auth) + + # Allow auth to make its changes. + r = auth(self) + + # Update self to reflect the auth changes. + self.__dict__.update(r.__dict__) + + # Recompute Content-Length + self.prepare_content_length(self.body) + + def prepare_cookies(self, cookies): + """Prepares the given HTTP cookie data. + + This function eventually generates a ``Cookie`` header from the + given cookies using cookielib. Due to cookielib's design, the header + will not be regenerated if it already exists, meaning this function + can only be called once for the life of the + :class:`PreparedRequest ` object. Any subsequent calls + to ``prepare_cookies`` will have no actual effect, unless the "Cookie" + header is removed beforehand. + """ + if isinstance(cookies, cookielib.CookieJar): + self._cookies = cookies + else: + self._cookies = cookiejar_from_dict(cookies) + + cookie_header = get_cookie_header(self._cookies, self) + if cookie_header is not None: + self.headers['Cookie'] = cookie_header + + def prepare_hooks(self, hooks): + """Prepares the given hooks.""" + # hooks can be passed as None to the prepare method and to this + # method. To prevent iterating over None, simply use an empty list + # if hooks is False-y + hooks = hooks or [] + for event in hooks: + self.register_hook(event, hooks[event]) + + +class Response(object): + """The :class:`Response ` object, which contains a + server's response to an HTTP request. + """ + + __attrs__ = [ + '_content', 'status_code', 'headers', 'url', 'history', + 'encoding', 'reason', 'cookies', 'elapsed', 'request' + ] + + def __init__(self): + self._content = False + self._content_consumed = False + self._next = None + + #: Integer Code of responded HTTP Status, e.g. 404 or 200. + self.status_code = None + + #: Case-insensitive Dictionary of Response Headers. + #: For example, ``headers['content-encoding']`` will return the + #: value of a ``'Content-Encoding'`` response header. + self.headers = CaseInsensitiveDict() + + #: File-like object representation of response (for advanced usage). + #: Use of ``raw`` requires that ``stream=True`` be set on the request. + #: This requirement does not apply for use internally to Requests. + self.raw = None + + #: Final URL location of Response. + self.url = None + + #: Encoding to decode with when accessing r.text. + self.encoding = None + + #: A list of :class:`Response ` objects from + #: the history of the Request. Any redirect responses will end + #: up here. The list is sorted from the oldest to the most recent request. + self.history = [] + + #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". + self.reason = None + + #: A CookieJar of Cookies the server sent back. + self.cookies = cookiejar_from_dict({}) + + #: The amount of time elapsed between sending the request + #: and the arrival of the response (as a timedelta). + #: This property specifically measures the time taken between sending + #: the first byte of the request and finishing parsing the headers. It + #: is therefore unaffected by consuming the response content or the + #: value of the ``stream`` keyword argument. + self.elapsed = datetime.timedelta(0) + + #: The :class:`PreparedRequest ` object to which this + #: is a response. + self.request = None + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def __getstate__(self): + # Consume everything; accessing the content attribute makes + # sure the content has been fully read. + if not self._content_consumed: + self.content + + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + for name, value in state.items(): + setattr(self, name, value) + + # pickled objects do not have .raw + setattr(self, '_content_consumed', True) + setattr(self, 'raw', None) + + def __repr__(self): + return '' % (self.status_code) + + def __bool__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __nonzero__(self): + """Returns True if :attr:`status_code` is less than 400. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code, is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + return self.ok + + def __iter__(self): + """Allows you to use a response as an iterator.""" + return self.iter_content(128) + + @property + def ok(self): + """Returns True if :attr:`status_code` is less than 400, False if not. + + This attribute checks if the status code of the response is between + 400 and 600 to see if there was a client error or a server error. If + the status code is between 200 and 400, this will return True. This + is **not** a check to see if the response code is ``200 OK``. + """ + try: + self.raise_for_status() + except HTTPError: + return False + return True + + @property + def is_redirect(self): + """True if this Response is a well-formed HTTP redirect that could have + been processed automatically (by :meth:`Session.resolve_redirects`). + """ + return ('location' in self.headers and self.status_code in REDIRECT_STATI) + + @property + def is_permanent_redirect(self): + """True if this Response one of the permanent versions of redirect.""" + return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect)) + + @property + def next(self): + """Returns a PreparedRequest for the next request in a redirect chain, if there is one.""" + return self._next + + @property + def apparent_encoding(self): + """The apparent encoding, provided by the chardet library.""" + return chardet.detect(self.content)['encoding'] + + def iter_content(self, chunk_size=1, decode_unicode=False): + """Iterates over the response data. When stream=True is set on the + request, this avoids reading the content at once into memory for + large responses. The chunk size is the number of bytes it should + read into memory. This is not necessarily the length of each item + returned as decoding can take place. + + chunk_size must be of type int or None. A value of None will + function differently depending on the value of `stream`. + stream=True will read data as it arrives in whatever size the + chunks are received. If stream=False, data is returned as + a single chunk. + + If decode_unicode is True, content will be decoded using the best + available encoding based on the response. + """ + + def generate(): + # Special case for urllib3. + if hasattr(self.raw, 'stream'): + try: + for chunk in self.raw.stream(chunk_size, decode_content=True): + yield chunk + except ProtocolError as e: + raise ChunkedEncodingError(e) + except DecodeError as e: + raise ContentDecodingError(e) + except ReadTimeoutError as e: + raise ConnectionError(e) + else: + # Standard file-like object. + while True: + chunk = self.raw.read(chunk_size) + if not chunk: + break + yield chunk + + self._content_consumed = True + + if self._content_consumed and isinstance(self._content, bool): + raise StreamConsumedError() + elif chunk_size is not None and not isinstance(chunk_size, int): + raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size)) + # simulate reading small chunks of the content + reused_chunks = iter_slices(self._content, chunk_size) + + stream_chunks = generate() + + chunks = reused_chunks if self._content_consumed else stream_chunks + + if decode_unicode: + chunks = stream_decode_response_unicode(chunks, self) + + return chunks + + def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None): + """Iterates over the response data, one line at a time. When + stream=True is set on the request, this avoids reading the + content at once into memory for large responses. + + .. note:: This method is not reentrant safe. + """ + + pending = None + + for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): + + if pending is not None: + chunk = pending + chunk + + if delimiter: + lines = chunk.split(delimiter) + else: + lines = chunk.splitlines() + + if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]: + pending = lines.pop() + else: + pending = None + + for line in lines: + yield line + + if pending is not None: + yield pending + + @property + def content(self): + """Content of the response, in bytes.""" + + if self._content is False: + # Read the contents. + if self._content_consumed: + raise RuntimeError( + 'The content for this response was already consumed') + + if self.status_code == 0 or self.raw is None: + self._content = None + else: + self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b'' + + self._content_consumed = True + # don't need to release the connection; that's been handled by urllib3 + # since we exhausted the data. + return self._content + + @property + def text(self): + """Content of the response, in unicode. + + If Response.encoding is None, encoding will be guessed using + ``chardet``. + + The encoding of the response content is determined based solely on HTTP + headers, following RFC 2616 to the letter. If you can take advantage of + non-HTTP knowledge to make a better guess at the encoding, you should + set ``r.encoding`` appropriately before accessing this property. + """ + + # Try charset from content-type + content = None + encoding = self.encoding + + if not self.content: + return str('') + + # Fallback to auto-detected encoding. + if self.encoding is None: + encoding = self.apparent_encoding + + # Decode unicode from given encoding. + try: + content = str(self.content, encoding, errors='replace') + except (LookupError, TypeError): + # A LookupError is raised if the encoding was not found which could + # indicate a misspelling or similar mistake. + # + # A TypeError can be raised if encoding is None + # + # So we try blindly encoding. + content = str(self.content, errors='replace') + + return content + + def json(self, **kwargs): + r"""Returns the json-encoded content of a response, if any. + + :param \*\*kwargs: Optional arguments that ``json.loads`` takes. + :raises ValueError: If the response body does not contain valid json. + """ + + if not self.encoding and self.content and len(self.content) > 3: + # No encoding set. JSON RFC 4627 section 3 states we should expect + # UTF-8, -16 or -32. Detect which one to use; If the detection or + # decoding fails, fall back to `self.text` (using chardet to make + # a best guess). + encoding = guess_json_utf(self.content) + if encoding is not None: + try: + return complexjson.loads( + self.content.decode(encoding), **kwargs + ) + except UnicodeDecodeError: + # Wrong UTF codec detected; usually because it's not UTF-8 + # but some other 8-bit codec. This is an RFC violation, + # and the server didn't bother to tell us what codec *was* + # used. + pass + return complexjson.loads(self.text, **kwargs) + + @property + def links(self): + """Returns the parsed header links of the response, if any.""" + + header = self.headers.get('link') + + # l = MultiDict() + l = {} + + if header: + links = parse_header_links(header) + + for link in links: + key = link.get('rel') or link.get('url') + l[key] = link + + return l + + def raise_for_status(self): + """Raises stored :class:`HTTPError`, if one occurred.""" + + http_error_msg = '' + if isinstance(self.reason, bytes): + # We attempt to decode utf-8 first because some servers + # choose to localize their reason strings. If the string + # isn't utf-8, we fall back to iso-8859-1 for all other + # encodings. (See PR #3538) + try: + reason = self.reason.decode('utf-8') + except UnicodeDecodeError: + reason = self.reason.decode('iso-8859-1') + else: + reason = self.reason + + if 400 <= self.status_code < 500: + http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url) + + elif 500 <= self.status_code < 600: + http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) + + if http_error_msg: + raise HTTPError(http_error_msg, response=self) + + def close(self): + """Releases the connection back to the pool. Once this method has been + called the underlying ``raw`` object must not be accessed again. + + *Note: Should not normally need to be called explicitly.* + """ + if not self._content_consumed: + self.raw.close() + + release_conn = getattr(self.raw, 'release_conn', None) + if release_conn is not None: + release_conn() diff --git a/requests/packages.py b/requests/packages.py new file mode 100644 index 0000000..7232fe0 --- /dev/null +++ b/requests/packages.py @@ -0,0 +1,14 @@ +import sys + +# This code exists for backwards compatibility reasons. +# I don't like it either. Just look the other way. :) + +for package in ('urllib3', 'idna', 'chardet'): + locals()[package] = __import__(package) + # This traversal is apparently necessary such that the identities are + # preserved (requests.packages.urllib3.* is urllib3.*) + for mod in list(sys.modules): + if mod == package or mod.startswith(package + '.'): + sys.modules['requests.packages.' + mod] = sys.modules[mod] + +# Kinda cool, though, right? diff --git a/requests/sessions.py b/requests/sessions.py new file mode 100644 index 0000000..2845880 --- /dev/null +++ b/requests/sessions.py @@ -0,0 +1,767 @@ +# -*- coding: utf-8 -*- + +""" +requests.session +~~~~~~~~~~~~~~~~ + +This module provides a Session object to manage and persist settings across +requests (cookies, auth, proxies). +""" +import os +import sys +import time +from datetime import timedelta +from collections import OrderedDict + +from .auth import _basic_auth_str +from .compat import cookielib, is_py3, urljoin, urlparse, Mapping +from .cookies import ( + cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) +from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT +from .hooks import default_hooks, dispatch_hook +from ._internal_utils import to_native_string +from .utils import to_key_val_list, default_headers, DEFAULT_PORTS +from .exceptions import ( + TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) + +from .structures import CaseInsensitiveDict +from .adapters import HTTPAdapter + +from .utils import ( + requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, + get_auth_from_url, rewind_body +) + +from .status_codes import codes + +# formerly defined here, reexposed here for backward compatibility +from .models import REDIRECT_STATI + +# Preferred clock, based on which one is more accurate on a given system. +if sys.platform == 'win32': + try: # Python 3.4+ + preferred_clock = time.perf_counter + except AttributeError: # Earlier than Python 3. + preferred_clock = time.clock +else: + preferred_clock = time.time + + +def merge_setting(request_setting, session_setting, dict_class=OrderedDict): + """Determines appropriate setting for a given request, taking into account + the explicit setting on that request, and the setting in the session. If a + setting is a dictionary, they will be merged together using `dict_class` + """ + + if session_setting is None: + return request_setting + + if request_setting is None: + return session_setting + + # Bypass if not a dictionary (e.g. verify) + if not ( + isinstance(session_setting, Mapping) and + isinstance(request_setting, Mapping) + ): + return request_setting + + merged_setting = dict_class(to_key_val_list(session_setting)) + merged_setting.update(to_key_val_list(request_setting)) + + # Remove keys that are set to None. Extract keys first to avoid altering + # the dictionary during iteration. + none_keys = [k for (k, v) in merged_setting.items() if v is None] + for key in none_keys: + del merged_setting[key] + + return merged_setting + + +def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): + """Properly merges both requests and session hooks. + + This is necessary because when request_hooks == {'response': []}, the + merge breaks Session hooks entirely. + """ + if session_hooks is None or session_hooks.get('response') == []: + return request_hooks + + if request_hooks is None or request_hooks.get('response') == []: + return session_hooks + + return merge_setting(request_hooks, session_hooks, dict_class) + + +class SessionRedirectMixin(object): + + def get_redirect_target(self, resp): + """Receives a Response. Returns a redirect URI or ``None``""" + # Due to the nature of how requests processes redirects this method will + # be called at least once upon the original response and at least twice + # on each subsequent redirect response (if any). + # If a custom mixin is used to handle this logic, it may be advantageous + # to cache the redirect location onto the response object as a private + # attribute. + if resp.is_redirect: + location = resp.headers['location'] + # Currently the underlying http module on py3 decode headers + # in latin1, but empirical evidence suggests that latin1 is very + # rarely used with non-ASCII characters in HTTP headers. + # It is more likely to get UTF8 header rather than latin1. + # This causes incorrect handling of UTF8 encoded location headers. + # To solve this, we re-encode the location in latin1. + if is_py3: + location = location.encode('latin1') + return to_native_string(location, 'utf8') + return None + + def should_strip_auth(self, old_url, new_url): + """Decide whether Authorization header should be removed when redirecting""" + old_parsed = urlparse(old_url) + new_parsed = urlparse(new_url) + if old_parsed.hostname != new_parsed.hostname: + return True + # Special case: allow http -> https redirect when using the standard + # ports. This isn't specified by RFC 7235, but is kept to avoid + # breaking backwards compatibility with older versions of requests + # that allowed any redirects on the same host. + if (old_parsed.scheme == 'http' and old_parsed.port in (80, None) + and new_parsed.scheme == 'https' and new_parsed.port in (443, None)): + return False + + # Handle default port usage corresponding to scheme. + changed_port = old_parsed.port != new_parsed.port + changed_scheme = old_parsed.scheme != new_parsed.scheme + default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) + if (not changed_scheme and old_parsed.port in default_port + and new_parsed.port in default_port): + return False + + # Standard case: root URI must match + return changed_port or changed_scheme + + def resolve_redirects(self, resp, req, stream=False, timeout=None, + verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): + """Receives a Response. Returns a generator of Responses or Requests.""" + + hist = [] # keep track of history + + url = self.get_redirect_target(resp) + previous_fragment = urlparse(req.url).fragment + while url: + prepared_request = req.copy() + + # Update history and keep track of redirects. + # resp.history must ignore the original request in this loop + hist.append(resp) + resp.history = hist[1:] + + try: + resp.content # Consume socket so it can be released + except (ChunkedEncodingError, ContentDecodingError, RuntimeError): + resp.raw.read(decode_content=False) + + if len(resp.history) >= self.max_redirects: + raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp) + + # Release the connection back into the pool. + resp.close() + + # Handle redirection without scheme (see: RFC 1808 Section 4) + if url.startswith('//'): + parsed_rurl = urlparse(resp.url) + url = ':'.join([to_native_string(parsed_rurl.scheme), url]) + + # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) + parsed = urlparse(url) + if parsed.fragment == '' and previous_fragment: + parsed = parsed._replace(fragment=previous_fragment) + elif parsed.fragment: + previous_fragment = parsed.fragment + url = parsed.geturl() + + # Facilitate relative 'location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + # Compliant with RFC3986, we percent encode the url. + if not parsed.netloc: + url = urljoin(resp.url, requote_uri(url)) + else: + url = requote_uri(url) + + prepared_request.url = to_native_string(url) + + self.rebuild_method(prepared_request, resp) + + # https://github.com/psf/requests/issues/1084 + if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): + # https://github.com/psf/requests/issues/3490 + purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding') + for header in purged_headers: + prepared_request.headers.pop(header, None) + prepared_request.body = None + + headers = prepared_request.headers + headers.pop('Cookie', None) + + # Extract any cookies sent on the response to the cookiejar + # in the new request. Because we've mutated our copied prepared + # request, use the old one that we haven't yet touched. + extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) + merge_cookies(prepared_request._cookies, self.cookies) + prepared_request.prepare_cookies(prepared_request._cookies) + + # Rebuild auth and proxy information. + proxies = self.rebuild_proxies(prepared_request, proxies) + self.rebuild_auth(prepared_request, resp) + + # A failed tell() sets `_body_position` to `object()`. This non-None + # value ensures `rewindable` will be True, allowing us to raise an + # UnrewindableBodyError, instead of hanging the connection. + rewindable = ( + prepared_request._body_position is not None and + ('Content-Length' in headers or 'Transfer-Encoding' in headers) + ) + + # Attempt to rewind consumed file-like object. + if rewindable: + rewind_body(prepared_request) + + # Override the original request. + req = prepared_request + + if yield_requests: + yield req + else: + + resp = self.send( + req, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + allow_redirects=False, + **adapter_kwargs + ) + + extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) + + # extract redirect url, if any, for the next loop + url = self.get_redirect_target(resp) + yield resp + + def rebuild_auth(self, prepared_request, response): + """When being redirected we may want to strip authentication from the + request to avoid leaking credentials. This method intelligently removes + and reapplies authentication where possible to avoid credential loss. + """ + headers = prepared_request.headers + url = prepared_request.url + + if 'Authorization' in headers and self.should_strip_auth(response.request.url, url): + # If we get redirected to a new host, we should strip out any + # authentication headers. + del headers['Authorization'] + + # .netrc might have more auth for us on our new host. + new_auth = get_netrc_auth(url) if self.trust_env else None + if new_auth is not None: + prepared_request.prepare_auth(new_auth) + + + def rebuild_proxies(self, prepared_request, proxies): + """This method re-evaluates the proxy configuration by considering the + environment variables. If we are redirected to a URL covered by + NO_PROXY, we strip the proxy configuration. Otherwise, we set missing + proxy keys for this URL (in case they were stripped by a previous + redirect). + + This method also replaces the Proxy-Authorization header where + necessary. + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + headers = prepared_request.headers + url = prepared_request.url + scheme = urlparse(url).scheme + new_proxies = proxies.copy() + no_proxy = proxies.get('no_proxy') + + bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy) + if self.trust_env and not bypass_proxy: + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get('all')) + + if proxy: + new_proxies.setdefault(scheme, proxy) + + if 'Proxy-Authorization' in headers: + del headers['Proxy-Authorization'] + + try: + username, password = get_auth_from_url(new_proxies[scheme]) + except KeyError: + username, password = None, None + + if username and password: + headers['Proxy-Authorization'] = _basic_auth_str(username, password) + + return new_proxies + + def rebuild_method(self, prepared_request, response): + """When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = prepared_request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.see_other and method != 'HEAD': + method = 'GET' + + # Do what the browsers do, despite standards... + # First, turn 302s into GETs. + if response.status_code == codes.found and method != 'HEAD': + method = 'GET' + + # Second, if a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in Issue 1704. + if response.status_code == codes.moved and method == 'POST': + method = 'GET' + + prepared_request.method = method + + +class Session(SessionRedirectMixin): + """A Requests session. + + Provides cookie persistence, connection-pooling, and configuration. + + Basic Usage:: + + >>> import requests + >>> s = requests.Session() + >>> s.get('https://httpbin.org/get') + + + Or as a context manager:: + + >>> with requests.Session() as s: + ... s.get('https://httpbin.org/get') + + """ + + __attrs__ = [ + 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify', + 'cert', 'adapters', 'stream', 'trust_env', + 'max_redirects', + ] + + def __init__(self): + + #: A case-insensitive dictionary of headers to be sent on each + #: :class:`Request ` sent from this + #: :class:`Session `. + self.headers = default_headers() + + #: Default Authentication tuple or object to attach to + #: :class:`Request `. + self.auth = None + + #: Dictionary mapping protocol or protocol and host to the URL of the proxy + #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to + #: be used on each :class:`Request `. + self.proxies = {} + + #: Event-handling hooks. + self.hooks = default_hooks() + + #: Dictionary of querystring data to attach to each + #: :class:`Request `. The dictionary values may be lists for + #: representing multivalued query parameters. + self.params = {} + + #: Stream response content default. + self.stream = False + + #: SSL Verification default. + self.verify = True + + #: SSL client certificate default, if String, path to ssl client + #: cert file (.pem). If Tuple, ('cert', 'key') pair. + self.cert = None + + #: Maximum number of redirects allowed. If the request exceeds this + #: limit, a :class:`TooManyRedirects` exception is raised. + #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is + #: 30. + self.max_redirects = DEFAULT_REDIRECT_LIMIT + + #: Trust environment settings for proxy configuration, default + #: authentication and similar. + self.trust_env = True + + #: A CookieJar containing all currently outstanding cookies set on this + #: session. By default it is a + #: :class:`RequestsCookieJar `, but + #: may be any other ``cookielib.CookieJar`` compatible object. + self.cookies = cookiejar_from_dict({}) + + # Default connection adapters. + self.adapters = OrderedDict() + self.mount('https://', HTTPAdapter()) + self.mount('http://', HTTPAdapter()) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest ` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request ` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + :rtype: requests.PreparedRequest + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = merge_cookies( + merge_cookies(RequestsCookieJar(), self.cookies), cookies) + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + json=request.json, + headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_hooks(request.hooks, self.hooks), + ) + return p + + def request(self, method, url, + params=None, data=None, headers=None, cookies=None, files=None, + auth=None, timeout=None, allow_redirects=True, proxies=None, + hooks=None, stream=None, verify=None, cert=None, json=None): + """Constructs a :class:`Request `, prepares it and sends it. + Returns :class:`Response ` object. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary or bytes to be sent in the query + string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the + :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the + :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the + :class:`Request`. + :param files: (optional) Dictionary of ``'filename': file-like-objects`` + for multipart encoding upload. + :param auth: (optional) Auth tuple or callable to enable + Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Set to True by default. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol or protocol and + hostname to the URL of the proxy. + :param stream: (optional) whether to immediately download the response + content. Defaults to ``False``. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + :param cert: (optional) if String, path to ssl client cert file (.pem). + If Tuple, ('cert', 'key') pair. + :rtype: requests.Response + """ + # Create the Request. + req = Request( + method=method.upper(), + url=url, + headers=headers, + files=files, + data=data or {}, + json=json, + params=params or {}, + auth=auth, + cookies=cookies, + hooks=hooks, + ) + prep = self.prepare_request(req) + + proxies = proxies or {} + + settings = self.merge_environment_settings( + prep.url, proxies, stream, verify, cert + ) + + # Send the request. + send_kwargs = { + 'timeout': timeout, + 'allow_redirects': allow_redirects, + } + send_kwargs.update(settings) + resp = self.send(prep, **send_kwargs) + + return resp + + def get(self, url, **kwargs): + r"""Sends a GET request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', True) + return self.request('GET', url, **kwargs) + + def options(self, url, **kwargs): + r"""Sends a OPTIONS request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', True) + return self.request('OPTIONS', url, **kwargs) + + def head(self, url, **kwargs): + r"""Sends a HEAD request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault('allow_redirects', False) + return self.request('HEAD', url, **kwargs) + + def post(self, url, data=None, json=None, **kwargs): + r"""Sends a POST request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request('POST', url, data=data, json=json, **kwargs) + + def put(self, url, data=None, **kwargs): + r"""Sends a PUT request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request('PUT', url, data=data, **kwargs) + + def patch(self, url, data=None, **kwargs): + r"""Sends a PATCH request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request('PATCH', url, data=data, **kwargs) + + def delete(self, url, **kwargs): + r"""Sends a DELETE request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request('DELETE', url, **kwargs) + + def send(self, request, **kwargs): + """Send a given PreparedRequest. + + :rtype: requests.Response + """ + # Set defaults that the hooks can utilize to ensure they always have + # the correct parameters to reproduce the previous request. + kwargs.setdefault('stream', self.stream) + kwargs.setdefault('verify', self.verify) + kwargs.setdefault('cert', self.cert) + kwargs.setdefault('proxies', self.proxies) + + # It's possible that users might accidentally send a Request object. + # Guard against that specific failure case. + if isinstance(request, Request): + raise ValueError('You can only send PreparedRequests.') + + # Set up variables needed for resolve_redirects and dispatching of hooks + allow_redirects = kwargs.pop('allow_redirects', True) + stream = kwargs.get('stream') + hooks = request.hooks + + # Get the appropriate adapter to use + adapter = self.get_adapter(url=request.url) + + # Start time (approximately) of the request + start = preferred_clock() + + # Send the request + r = adapter.send(request, **kwargs) + + # Total elapsed time of the request (approximately) + elapsed = preferred_clock() - start + r.elapsed = timedelta(seconds=elapsed) + + # Response manipulation hooks + r = dispatch_hook('response', hooks, r, **kwargs) + + # Persist cookies + if r.history: + + # If the hooks create history then we want those cookies too + for resp in r.history: + extract_cookies_to_jar(self.cookies, resp.request, resp.raw) + + extract_cookies_to_jar(self.cookies, request, r.raw) + + # Redirect resolving generator. + gen = self.resolve_redirects(r, request, **kwargs) + + # Resolve redirects if allowed. + history = [resp for resp in gen] if allow_redirects else [] + + # Shuffle things around if there's history. + if history: + # Insert the first (original) request at the start + history.insert(0, r) + # Get the last request made + r = history.pop() + r.history = history + + # If redirects aren't being followed, store the response on the Request for Response.next(). + if not allow_redirects: + try: + r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs)) + except StopIteration: + pass + + if not stream: + r.content + + return r + + def merge_environment_settings(self, url, proxies, stream, verify, cert): + """ + Check the environment and merge it with some settings. + + :rtype: dict + """ + # Gather clues from the surrounding environment. + if self.trust_env: + # Set environment's proxies. + no_proxy = proxies.get('no_proxy') if proxies is not None else None + env_proxies = get_environ_proxies(url, no_proxy=no_proxy) + for (k, v) in env_proxies.items(): + proxies.setdefault(k, v) + + # Look for requests environment configuration and be compatible + # with cURL. + if verify is True or verify is None: + verify = (os.environ.get('REQUESTS_CA_BUNDLE') or + os.environ.get('CURL_CA_BUNDLE')) + + # Merge all the kwargs. + proxies = merge_setting(proxies, self.proxies) + stream = merge_setting(stream, self.stream) + verify = merge_setting(verify, self.verify) + cert = merge_setting(cert, self.cert) + + return {'verify': verify, 'proxies': proxies, 'stream': stream, + 'cert': cert} + + def get_adapter(self, url): + """ + Returns the appropriate connection adapter for the given URL. + + :rtype: requests.adapters.BaseAdapter + """ + for (prefix, adapter) in self.adapters.items(): + + if url.lower().startswith(prefix.lower()): + return adapter + + # Nothing matches :-/ + raise InvalidSchema("No connection adapters were found for {!r}".format(url)) + + def close(self): + """Closes all adapters and as such the session""" + for v in self.adapters.values(): + v.close() + + def mount(self, prefix, adapter): + """Registers a connection adapter to a prefix. + + Adapters are sorted in descending order by prefix length. + """ + self.adapters[prefix] = adapter + keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] + + for key in keys_to_move: + self.adapters[key] = self.adapters.pop(key) + + def __getstate__(self): + state = {attr: getattr(self, attr, None) for attr in self.__attrs__} + return state + + def __setstate__(self, state): + for attr, value in state.items(): + setattr(self, attr, value) + + +def session(): + """ + Returns a :class:`Session` for context-management. + + .. deprecated:: 1.0.0 + + This method has been deprecated since version 1.0.0 and is only kept for + backwards compatibility. New code should use :class:`~requests.sessions.Session` + to create a session. This may be removed at a future date. + + :rtype: Session + """ + return Session() diff --git a/requests/status_codes.py b/requests/status_codes.py new file mode 100644 index 0000000..d80a7cd --- /dev/null +++ b/requests/status_codes.py @@ -0,0 +1,123 @@ +# -*- coding: utf-8 -*- + +r""" +The ``codes`` object defines a mapping from common names for HTTP statuses +to their numerical codes, accessible either as attributes or as dictionary +items. + +Example:: + + >>> import requests + >>> requests.codes['temporary_redirect'] + 307 + >>> requests.codes.teapot + 418 + >>> requests.codes['\o/'] + 200 + +Some codes have multiple names, and both upper- and lower-case versions of +the names are allowed. For example, ``codes.ok``, ``codes.OK``, and +``codes.okay`` all correspond to the HTTP status code 200. +""" + +from .structures import LookupDict + +_codes = { + + # Informational. + 100: ('continue',), + 101: ('switching_protocols',), + 102: ('processing',), + 103: ('checkpoint',), + 122: ('uri_too_long', 'request_uri_too_long'), + 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'), + 201: ('created',), + 202: ('accepted',), + 203: ('non_authoritative_info', 'non_authoritative_information'), + 204: ('no_content',), + 205: ('reset_content', 'reset'), + 206: ('partial_content', 'partial'), + 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'), + 208: ('already_reported',), + 226: ('im_used',), + + # Redirection. + 300: ('multiple_choices',), + 301: ('moved_permanently', 'moved', '\\o-'), + 302: ('found',), + 303: ('see_other', 'other'), + 304: ('not_modified',), + 305: ('use_proxy',), + 306: ('switch_proxy',), + 307: ('temporary_redirect', 'temporary_moved', 'temporary'), + 308: ('permanent_redirect', + 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0 + + # Client Error. + 400: ('bad_request', 'bad'), + 401: ('unauthorized',), + 402: ('payment_required', 'payment'), + 403: ('forbidden',), + 404: ('not_found', '-o-'), + 405: ('method_not_allowed', 'not_allowed'), + 406: ('not_acceptable',), + 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'), + 408: ('request_timeout', 'timeout'), + 409: ('conflict',), + 410: ('gone',), + 411: ('length_required',), + 412: ('precondition_failed', 'precondition'), + 413: ('request_entity_too_large',), + 414: ('request_uri_too_large',), + 415: ('unsupported_media_type', 'unsupported_media', 'media_type'), + 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'), + 417: ('expectation_failed',), + 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'), + 421: ('misdirected_request',), + 422: ('unprocessable_entity', 'unprocessable'), + 423: ('locked',), + 424: ('failed_dependency', 'dependency'), + 425: ('unordered_collection', 'unordered'), + 426: ('upgrade_required', 'upgrade'), + 428: ('precondition_required', 'precondition'), + 429: ('too_many_requests', 'too_many'), + 431: ('header_fields_too_large', 'fields_too_large'), + 444: ('no_response', 'none'), + 449: ('retry_with', 'retry'), + 450: ('blocked_by_windows_parental_controls', 'parental_controls'), + 451: ('unavailable_for_legal_reasons', 'legal_reasons'), + 499: ('client_closed_request',), + + # Server Error. + 500: ('internal_server_error', 'server_error', '/o\\', '✗'), + 501: ('not_implemented',), + 502: ('bad_gateway',), + 503: ('service_unavailable', 'unavailable'), + 504: ('gateway_timeout',), + 505: ('http_version_not_supported', 'http_version'), + 506: ('variant_also_negotiates',), + 507: ('insufficient_storage',), + 509: ('bandwidth_limit_exceeded', 'bandwidth'), + 510: ('not_extended',), + 511: ('network_authentication_required', 'network_auth', 'network_authentication'), +} + +codes = LookupDict(name='status_codes') + +def _init(): + for code, titles in _codes.items(): + for title in titles: + setattr(codes, title, code) + if not title.startswith(('\\', '/')): + setattr(codes, title.upper(), code) + + def doc(code): + names = ', '.join('``%s``' % n for n in _codes[code]) + return '* %d: %s' % (code, names) + + global __doc__ + __doc__ = (__doc__ + '\n' + + '\n'.join(doc(code) for code in sorted(_codes)) + if __doc__ is not None else None) + +_init() diff --git a/requests/structures.py b/requests/structures.py new file mode 100644 index 0000000..8ee0ba7 --- /dev/null +++ b/requests/structures.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- + +""" +requests.structures +~~~~~~~~~~~~~~~~~~~ + +Data structures that power Requests. +""" + +from collections import OrderedDict + +from .compat import Mapping, MutableMapping + + +class CaseInsensitiveDict(MutableMapping): + """A case-insensitive ``dict``-like object. + + Implements all methods and operations of + ``MutableMapping`` as well as dict's ``copy``. Also + provides ``lower_items``. + + All keys are expected to be strings. The structure remembers the + case of the last key to be set, and ``iter(instance)``, + ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` + will contain case-sensitive keys. However, querying and contains + testing is case insensitive:: + + cid = CaseInsensitiveDict() + cid['Accept'] = 'application/json' + cid['aCCEPT'] == 'application/json' # True + list(cid) == ['Accept'] # True + + For example, ``headers['content-encoding']`` will return the + value of a ``'Content-Encoding'`` response header, regardless + of how the header name was originally stored. + + If the constructor, ``.update``, or equality comparison + operations are given keys that have equal ``.lower()``s, the + behavior is undefined. + """ + + def __init__(self, data=None, **kwargs): + self._store = OrderedDict() + if data is None: + data = {} + self.update(data, **kwargs) + + def __setitem__(self, key, value): + # Use the lowercased key for lookups, but store the actual + # key alongside the value. + self._store[key.lower()] = (key, value) + + def __getitem__(self, key): + return self._store[key.lower()][1] + + def __delitem__(self, key): + del self._store[key.lower()] + + def __iter__(self): + return (casedkey for casedkey, mappedvalue in self._store.values()) + + def __len__(self): + return len(self._store) + + def lower_items(self): + """Like iteritems(), but with all lowercase keys.""" + return ( + (lowerkey, keyval[1]) + for (lowerkey, keyval) + in self._store.items() + ) + + def __eq__(self, other): + if isinstance(other, Mapping): + other = CaseInsensitiveDict(other) + else: + return NotImplemented + # Compare insensitively + return dict(self.lower_items()) == dict(other.lower_items()) + + # Copy is required + def copy(self): + return CaseInsensitiveDict(self._store.values()) + + def __repr__(self): + return str(dict(self.items())) + + +class LookupDict(dict): + """Dictionary lookup object.""" + + def __init__(self, name=None): + self.name = name + super(LookupDict, self).__init__() + + def __repr__(self): + return '' % (self.name) + + def __getitem__(self, key): + # We allow fall-through here, so values default to None + + return self.__dict__.get(key, None) + + def get(self, key, default=None): + return self.__dict__.get(key, default) diff --git a/requests/utils.py b/requests/utils.py new file mode 100644 index 0000000..c1700d7 --- /dev/null +++ b/requests/utils.py @@ -0,0 +1,982 @@ +# -*- coding: utf-8 -*- + +""" +requests.utils +~~~~~~~~~~~~~~ + +This module provides utility functions that are used within Requests +that are also useful for external consumption. +""" + +import codecs +import contextlib +import io +import os +import re +import socket +import struct +import sys +import tempfile +import warnings +import zipfile +from collections import OrderedDict + +from .__version__ import __version__ +from . import certs +# to_native_string is unused here, but imported here for backwards compatibility +from ._internal_utils import to_native_string +from .compat import parse_http_list as _parse_list_header +from .compat import ( + quote, urlparse, bytes, str, unquote, getproxies, + proxy_bypass, urlunparse, basestring, integer_types, is_py3, + proxy_bypass_environment, getproxies_environment, Mapping) +from .cookies import cookiejar_from_dict +from .structures import CaseInsensitiveDict +from .exceptions import ( + InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError) + +NETRC_FILES = ('.netrc', '_netrc') + +DEFAULT_CA_BUNDLE_PATH = certs.where() + +DEFAULT_PORTS = {'http': 80, 'https': 443} + + +if sys.platform == 'win32': + # provide a proxy_bypass version on Windows without DNS lookups + + def proxy_bypass_registry(host): + try: + if is_py3: + import winreg + else: + import _winreg as winreg + except ImportError: + return False + + try: + internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER, + r'Software\Microsoft\Windows\CurrentVersion\Internet Settings') + # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it + proxyEnable = int(winreg.QueryValueEx(internetSettings, + 'ProxyEnable')[0]) + # ProxyOverride is almost always a string + proxyOverride = winreg.QueryValueEx(internetSettings, + 'ProxyOverride')[0] + except OSError: + return False + if not proxyEnable or not proxyOverride: + return False + + # make a check value list from the registry entry: replace the + # '' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(';') + # now check if we match one of the registry values. + for test in proxyOverride: + if test == '': + if '.' not in host: + return True + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + if re.match(test, host, re.I): + return True + return False + + def proxy_bypass(host): # noqa + """Return True, if the host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + + +def dict_to_sequence(d): + """Returns an internal sequence dictionary update.""" + + if hasattr(d, 'items'): + d = d.items() + + return d + + +def super_len(o): + total_length = None + current_position = 0 + + if hasattr(o, '__len__'): + total_length = len(o) + + elif hasattr(o, 'len'): + total_length = o.len + + elif hasattr(o, 'fileno'): + try: + fileno = o.fileno() + except io.UnsupportedOperation: + pass + else: + total_length = os.fstat(fileno).st_size + + # Having used fstat to determine the file length, we need to + # confirm that this file was opened up in binary mode. + if 'b' not in o.mode: + warnings.warn(( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode."), + FileModeWarning + ) + + if hasattr(o, 'tell'): + try: + current_position = o.tell() + except (OSError, IOError): + # This can happen in some weird situations, such as when the file + # is actually a special file descriptor like stdin. In this + # instance, we don't know what the length is, so set it to zero and + # let requests chunk it instead. + if total_length is not None: + current_position = total_length + else: + if hasattr(o, 'seek') and total_length is None: + # StringIO and BytesIO have seek but no useable fileno + try: + # seek to end of file + o.seek(0, 2) + total_length = o.tell() + + # seek back to current position to support + # partially read file-like objects + o.seek(current_position or 0) + except (OSError, IOError): + total_length = 0 + + if total_length is None: + total_length = 0 + + return max(0, total_length - current_position) + + +def get_netrc_auth(url, raise_errors=False): + """Returns the Requests tuple auth for a given url from netrc.""" + + try: + from netrc import netrc, NetrcParseError + + netrc_path = None + + for f in NETRC_FILES: + try: + loc = os.path.expanduser('~/{}'.format(f)) + except KeyError: + # os.path.expanduser can fail when $HOME is undefined and + # getpwuid fails. See https://bugs.python.org/issue20164 & + # https://github.com/psf/requests/issues/1846 + return + + if os.path.exists(loc): + netrc_path = loc + break + + # Abort early if there isn't one. + if netrc_path is None: + return + + ri = urlparse(url) + + # Strip port numbers from netloc. This weird `if...encode`` dance is + # used for Python 3.2, which doesn't support unicode literals. + splitstr = b':' + if isinstance(url, str): + splitstr = splitstr.decode('ascii') + host = ri.netloc.split(splitstr)[0] + + try: + _netrc = netrc(netrc_path).authenticators(host) + if _netrc: + # Return with login / password + login_i = (0 if _netrc[0] else 1) + return (_netrc[login_i], _netrc[2]) + except (NetrcParseError, IOError): + # If there was a parsing error or a permissions issue reading the file, + # we'll just skip netrc auth unless explicitly asked to raise errors. + if raise_errors: + raise + + # AppEngine hackiness. + except (ImportError, AttributeError): + pass + + +def guess_filename(obj): + """Tries to guess the filename of the given object.""" + name = getattr(obj, 'name', None) + if (name and isinstance(name, basestring) and name[0] != '<' and + name[-1] != '>'): + return os.path.basename(name) + + +def extract_zipped_paths(path): + """Replace nonexistent paths that look like they refer to a member of a zip + archive with the location of an extracted copy of the target, or else + just return the provided path unchanged. + """ + if os.path.exists(path): + # this is already a valid path, no need to do anything further + return path + + # find the first valid part of the provided path and treat that as a zip archive + # assume the rest of the path is the name of a member in the archive + archive, member = os.path.split(path) + while archive and not os.path.exists(archive): + archive, prefix = os.path.split(archive) + member = '/'.join([prefix, member]) + + if not zipfile.is_zipfile(archive): + return path + + zip_file = zipfile.ZipFile(archive) + if member not in zip_file.namelist(): + return path + + # we have a valid zip archive and a valid member of that archive + tmp = tempfile.gettempdir() + extracted_path = os.path.join(tmp, *member.split('/')) + if not os.path.exists(extracted_path): + extracted_path = zip_file.extract(member, path=tmp) + + return extracted_path + + +def from_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. Unless it can not be represented as such, return an + OrderedDict, e.g., + + :: + + >>> from_key_val_list([('key', 'val')]) + OrderedDict([('key', 'val')]) + >>> from_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + >>> from_key_val_list({'key': 'val'}) + OrderedDict([('key', 'val')]) + + :rtype: OrderedDict + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError('cannot encode objects that are not 2-tuples') + + return OrderedDict(value) + + +def to_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. If it can be, return a list of tuples, e.g., + + :: + + >>> to_key_val_list([('key', 'val')]) + [('key', 'val')] + >>> to_key_val_list({'key': 'val'}) + [('key', 'val')] + >>> to_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + + :rtype: list + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError('cannot encode objects that are not 2-tuples') + + if isinstance(value, Mapping): + value = value.items() + + return list(value) + + +# From mitsuhiko/werkzeug (used with permission). +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + It basically works like :func:`parse_set_header` just that items + may appear multiple times and case sensitivity is preserved. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + To create a header from the :class:`list` again, use the + :func:`dump_header` function. + + :param value: a string with a list header. + :return: :class:`list` + :rtype: list + """ + result = [] + for item in _parse_list_header(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +# From mitsuhiko/werkzeug (used with permission). +def parse_dict_header(value): + """Parse lists of key, value pairs as described by RFC 2068 Section 2 and + convert them into a python dict: + + >>> d = parse_dict_header('foo="is a fish", bar="as well"') + >>> type(d) is dict + True + >>> sorted(d.items()) + [('bar', 'as well'), ('foo', 'is a fish')] + + If there is no value for a key it will be `None`: + + >>> parse_dict_header('key_without_value') + {'key_without_value': None} + + To create a header from the :class:`dict` again, use the + :func:`dump_header` function. + + :param value: a string with a dict header. + :return: :class:`dict` + :rtype: dict + """ + result = {} + for item in _parse_list_header(value): + if '=' not in item: + result[item] = None + continue + name, value = item.split('=', 1) + if value[:1] == value[-1:] == '"': + value = unquote_header_value(value[1:-1]) + result[name] = value + return result + + +# From mitsuhiko/werkzeug (used with permission). +def unquote_header_value(value, is_filename=False): + r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + :rtype: str + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + + # if this is a filename and the starting characters look like + # a UNC path, then just return the value without quotes. Using the + # replace sequence below on a UNC path has the effect of turning + # the leading double slash into a single slash and then + # _fix_ie_filename() doesn't work correctly. See #458. + if not is_filename or value[:2] != '\\\\': + return value.replace('\\\\', '\\').replace('\\"', '"') + return value + + +def dict_from_cookiejar(cj): + """Returns a key/value dictionary from a CookieJar. + + :param cj: CookieJar object to extract cookies from. + :rtype: dict + """ + + cookie_dict = {} + + for cookie in cj: + cookie_dict[cookie.name] = cookie.value + + return cookie_dict + + +def add_dict_to_cookiejar(cj, cookie_dict): + """Returns a CookieJar from a key/value dictionary. + + :param cj: CookieJar to insert cookies into. + :param cookie_dict: Dict of key/values to insert into CookieJar. + :rtype: CookieJar + """ + + return cookiejar_from_dict(cookie_dict, cj) + + +def get_encodings_from_content(content): + """Returns encodings from given content string. + + :param content: bytestring to extract encodings from. + """ + warnings.warn(( + 'In requests 3.0, get_encodings_from_content will be removed. For ' + 'more information, please see the discussion on issue #2266. (This' + ' warning should only appear once.)'), + DeprecationWarning) + + charset_re = re.compile(r']', flags=re.I) + pragma_re = re.compile(r']', flags=re.I) + xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') + + return (charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content)) + + +def _parse_content_type_header(header): + """Returns content type and parameters from given header + + :param header: string + :return: tuple containing content type and dictionary of + parameters + """ + + tokens = header.split(';') + content_type, params = tokens[0].strip(), tokens[1:] + params_dict = {} + items_to_strip = "\"' " + + for param in params: + param = param.strip() + if param: + key, value = param, True + index_of_equals = param.find("=") + if index_of_equals != -1: + key = param[:index_of_equals].strip(items_to_strip) + value = param[index_of_equals + 1:].strip(items_to_strip) + params_dict[key.lower()] = value + return content_type, params_dict + + +def get_encoding_from_headers(headers): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :rtype: str + """ + + content_type = headers.get('content-type') + + if not content_type: + return None + + content_type, params = _parse_content_type_header(content_type) + + if 'charset' in params: + return params['charset'].strip("'\"") + + if 'text' in content_type: + return 'ISO-8859-1' + + +def stream_decode_response_unicode(iterator, r): + """Stream decodes a iterator.""" + + if r.encoding is None: + for item in iterator: + yield item + return + + decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace') + for chunk in iterator: + rv = decoder.decode(chunk) + if rv: + yield rv + rv = decoder.decode(b'', final=True) + if rv: + yield rv + + +def iter_slices(string, slice_length): + """Iterate over slices of a string.""" + pos = 0 + if slice_length is None or slice_length <= 0: + slice_length = len(string) + while pos < len(string): + yield string[pos:pos + slice_length] + pos += slice_length + + +def get_unicode_from_response(r): + """Returns the requested content back in unicode. + + :param r: Response object to get unicode content from. + + Tried: + + 1. charset from content-type + 2. fall back and replace all unicode characters + + :rtype: str + """ + warnings.warn(( + 'In requests 3.0, get_unicode_from_response will be removed. For ' + 'more information, please see the discussion on issue #2266. (This' + ' warning should only appear once.)'), + DeprecationWarning) + + tried_encodings = [] + + # Try charset from content-type + encoding = get_encoding_from_headers(r.headers) + + if encoding: + try: + return str(r.content, encoding) + except UnicodeError: + tried_encodings.append(encoding) + + # Fall back: + try: + return str(r.content, encoding, errors='replace') + except TypeError: + return r.content + + +# The unreserved URI characters (RFC 3986) +UNRESERVED_SET = frozenset( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~") + + +def unquote_unreserved(uri): + """Un-escape any percent-escape sequences in a URI that are unreserved + characters. This leaves all reserved, illegal and non-ASCII bytes encoded. + + :rtype: str + """ + parts = uri.split('%') + for i in range(1, len(parts)): + h = parts[i][0:2] + if len(h) == 2 and h.isalnum(): + try: + c = chr(int(h, 16)) + except ValueError: + raise InvalidURL("Invalid percent-escape sequence: '%s'" % h) + + if c in UNRESERVED_SET: + parts[i] = c + parts[i][2:] + else: + parts[i] = '%' + parts[i] + else: + parts[i] = '%' + parts[i] + return ''.join(parts) + + +def requote_uri(uri): + """Re-quote the given URI. + + This function passes the given URI through an unquote/quote cycle to + ensure that it is fully and consistently quoted. + + :rtype: str + """ + safe_with_percent = "!#$%&'()*+,/:;=?@[]~" + safe_without_percent = "!#$&'()*+,/:;=?@[]~" + try: + # Unquote only the unreserved characters + # Then quote only illegal characters (do not quote reserved, + # unreserved, or '%') + return quote(unquote_unreserved(uri), safe=safe_with_percent) + except InvalidURL: + # We couldn't unquote the given URI, so let's try quoting it, but + # there may be unquoted '%'s in the URI. We need to make sure they're + # properly quoted so they do not cause issues elsewhere. + return quote(uri, safe=safe_without_percent) + + +def address_in_network(ip, net): + """This function allows you to check if an IP belongs to a network subnet + + Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 + returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 + + :rtype: bool + """ + ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0] + netaddr, bits = net.split('/') + netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask + return (ipaddr & netmask) == (network & netmask) + + +def dotted_netmask(mask): + """Converts mask from /xx format to xxx.xxx.xxx.xxx + + Example: if mask is 24 function returns 255.255.255.0 + + :rtype: str + """ + bits = 0xffffffff ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack('>I', bits)) + + +def is_ipv4_address(string_ip): + """ + :rtype: bool + """ + try: + socket.inet_aton(string_ip) + except socket.error: + return False + return True + + +def is_valid_cidr(string_network): + """ + Very simple check of the cidr format in no_proxy variable. + + :rtype: bool + """ + if string_network.count('/') == 1: + try: + mask = int(string_network.split('/')[1]) + except ValueError: + return False + + if mask < 1 or mask > 32: + return False + + try: + socket.inet_aton(string_network.split('/')[0]) + except socket.error: + return False + else: + return False + return True + + +@contextlib.contextmanager +def set_environ(env_name, value): + """Set the environment variable 'env_name' to 'value' + + Save previous value, yield, and then restore the previous value stored in + the environment variable 'env_name'. + + If 'value' is None, do nothing""" + value_changed = value is not None + if value_changed: + old_value = os.environ.get(env_name) + os.environ[env_name] = value + try: + yield + finally: + if value_changed: + if old_value is None: + del os.environ[env_name] + else: + os.environ[env_name] = old_value + + +def should_bypass_proxies(url, no_proxy): + """ + Returns whether we should bypass proxies or not. + + :rtype: bool + """ + # Prioritize lowercase environment variables over uppercase + # to keep a consistent behaviour with other http projects (curl, wget). + get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) + + # First check whether no_proxy is defined. If it is, check that the URL + # we're getting isn't in the no_proxy list. + no_proxy_arg = no_proxy + if no_proxy is None: + no_proxy = get_proxy('no_proxy') + parsed = urlparse(url) + + if parsed.hostname is None: + # URLs don't always have hostnames, e.g. file:/// urls. + return True + + if no_proxy: + # We need to check whether we match here. We need to see if we match + # the end of the hostname, both with and without the port. + no_proxy = ( + host for host in no_proxy.replace(' ', '').split(',') if host + ) + + if is_ipv4_address(parsed.hostname): + for proxy_ip in no_proxy: + if is_valid_cidr(proxy_ip): + if address_in_network(parsed.hostname, proxy_ip): + return True + elif parsed.hostname == proxy_ip: + # If no_proxy ip was defined in plain IP notation instead of cidr notation & + # matches the IP of the index + return True + else: + host_with_port = parsed.hostname + if parsed.port: + host_with_port += ':{}'.format(parsed.port) + + for host in no_proxy: + if parsed.hostname.endswith(host) or host_with_port.endswith(host): + # The URL does match something in no_proxy, so we don't want + # to apply the proxies on this URL. + return True + + with set_environ('no_proxy', no_proxy_arg): + # parsed.hostname can be `None` in cases such as a file URI. + try: + bypass = proxy_bypass(parsed.hostname) + except (TypeError, socket.gaierror): + bypass = False + + if bypass: + return True + + return False + + +def get_environ_proxies(url, no_proxy=None): + """ + Return a dict of environment proxies. + + :rtype: dict + """ + if should_bypass_proxies(url, no_proxy=no_proxy): + return {} + else: + return getproxies() + + +def select_proxy(url, proxies): + """Select a proxy for the url, if applicable. + + :param url: The url being for the request + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + """ + proxies = proxies or {} + urlparts = urlparse(url) + if urlparts.hostname is None: + return proxies.get(urlparts.scheme, proxies.get('all')) + + proxy_keys = [ + urlparts.scheme + '://' + urlparts.hostname, + urlparts.scheme, + 'all://' + urlparts.hostname, + 'all', + ] + proxy = None + for proxy_key in proxy_keys: + if proxy_key in proxies: + proxy = proxies[proxy_key] + break + + return proxy + + +def default_user_agent(name="python-requests"): + """ + Return a string representing the default user agent. + + :rtype: str + """ + return '%s/%s' % (name, __version__) + + +def default_headers(): + """ + :rtype: requests.structures.CaseInsensitiveDict + """ + return CaseInsensitiveDict({ + 'User-Agent': default_user_agent(), + 'Accept-Encoding': ', '.join(('gzip', 'deflate')), + 'Accept': '*/*', + 'Connection': 'keep-alive', + }) + + +def parse_header_links(value): + """Return a list of parsed link headers proxies. + + i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" + + :rtype: list + """ + + links = [] + + replace_chars = ' \'"' + + value = value.strip(replace_chars) + if not value: + return links + + for val in re.split(', *<', value): + try: + url, params = val.split(';', 1) + except ValueError: + url, params = val, '' + + link = {'url': url.strip('<> \'"')} + + for param in params.split(';'): + try: + key, value = param.split('=') + except ValueError: + break + + link[key.strip(replace_chars)] = value.strip(replace_chars) + + links.append(link) + + return links + + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3 +_null2 = _null * 2 +_null3 = _null * 3 + + +def guess_json_utf(data): + """ + :rtype: str + """ + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return 'utf-32' # BOM included + if sample[:3] == codecs.BOM_UTF8: + return 'utf-8-sig' # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return 'utf-16' # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return 'utf-8' + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return 'utf-16-be' + if sample[1::2] == _null2: # 2nd and 4th are null + return 'utf-16-le' + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return 'utf-32-be' + if sample[1:] == _null3: + return 'utf-32-le' + # Did not detect a valid UTF-32 ascii-range character + return None + + +def prepend_scheme_if_needed(url, new_scheme): + """Given a URL that may or may not have a scheme, prepend the given scheme. + Does not replace a present scheme with the one provided as an argument. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) + + # urlparse is a finicky beast, and sometimes decides that there isn't a + # netloc present. Assume that it's being over-cautious, and switch netloc + # and path if urlparse decided there was no netloc. + if not netloc: + netloc, path = path, netloc + + return urlunparse((scheme, netloc, path, params, query, fragment)) + + +def get_auth_from_url(url): + """Given a url with authentication components, extract them into a tuple of + username,password. + + :rtype: (str,str) + """ + parsed = urlparse(url) + + try: + auth = (unquote(parsed.username), unquote(parsed.password)) + except (AttributeError, TypeError): + auth = ('', '') + + return auth + + +# Moved outside of function to avoid recompile every call +_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$') +_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$') + + +def check_header_validity(header): + """Verifies that header value is a string which doesn't contain + leading whitespace or return characters. This prevents unintended + header injection. + + :param header: tuple, in the format (name, value). + """ + name, value = header + + if isinstance(value, bytes): + pat = _CLEAN_HEADER_REGEX_BYTE + else: + pat = _CLEAN_HEADER_REGEX_STR + try: + if not pat.match(value): + raise InvalidHeader("Invalid return character or leading space in header: %s" % name) + except TypeError: + raise InvalidHeader("Value for header {%s: %s} must be of type str or " + "bytes, not %s" % (name, value, type(value))) + + +def urldefragauth(url): + """ + Given a url remove the fragment and the authentication part. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url) + + # see func:`prepend_scheme_if_needed` + if not netloc: + netloc, path = path, netloc + + netloc = netloc.rsplit('@', 1)[-1] + + return urlunparse((scheme, netloc, path, params, query, '')) + + +def rewind_body(prepared_request): + """Move file pointer back to its recorded starting position + so it can be read again on redirect. + """ + body_seek = getattr(prepared_request.body, 'seek', None) + if body_seek is not None and isinstance(prepared_request._body_position, integer_types): + try: + body_seek(prepared_request._body_position) + except (IOError, OSError): + raise UnrewindableBodyError("An error occurred when rewinding request " + "body for redirect.") + else: + raise UnrewindableBodyError("Unable to rewind request body for redirect.")