!*74M!IjWnbGTJ5fOwf`K9iJ;;Qfdm2}vGJOa0=ojc0ZL`p)L1>*)vPp<
zds?WnC*Gh45W@>)julx1u?l-UB
zuRkf5T>{UeFIjK4LdZW+Sbhu;Zon7+5`Ym#hoqa8v`e*YbWLV3b7+ln-Q39T+LVB7
z4fCTyw*d7Vv;Rc84$HFwlrE!(q~lzGHFEcY0(K1Ljs{Pr6OZ}ASJkZE;{y@!!8qzr
zRX*bWAoA`6f)9E@PcIn9!_$JV2;VLE;;XP&Nm3&1Ii*;mb3%zQWNt@Xh|Qb#%=jC#hwt`oY`WMKLU;
z{D%CN{DDX}t&6H`gpC>1yK~bOn5{lHU`6?~DI0T|uHt?{T9by>$*1eO
z#YkINKD-V-+0Z_*)6F?~_Al@bdc|V7uX*L~-_R?e_DcJl^1s2Jw&vtsE?G&M$!fBe
ztm~a^Gh438HMu^w&=<0vZp#(fWcJ)lcUXRoZRj1n>sRqccpHXG8AnqwE8b`suA)4U%>VxNHR10E}
zX|_){_N`LR$r9rMbFCd%f1#Kt3KVaZLjmg^74zTg(ti_;05lE52|;}}+q2d1!S
zI#ap0=MMu_c;NR=AN!msD;5Em%oLRKX~4(cG54V(c;pXNK3a$c0sHU-7_;KRr-8B_
z4T8tY8U=o&%;P|GTCl`13x>ECnAjOl#9#!PT$S~<_0f$3p-S2kt_vmS)~|0K9(s2_
zxufiZ+rRth)@^0$HFD*~C*#MSI2nLzOfaYNqwIJq`b5z)m4D)8PBG6s@Yk95K$$}y
z{4(JIQ*@||2eGoF9v@F)MSF^#DEjy_!Vkbp&)?ZU8IOYf>4Rw`ru!#hxIa15?th~<
z^y7HY9|U}VDh9(iL$5y&*C#M3lmb>{gOM055$e(cyjyhBXwVjI(h{xFicz9Xqee?c
z#lU{ofN!BwpbgLhJ+0HEI|ly+*y)wf+Tyta?P3;bi9v>?4e5HFIJq>zZR7MdFR
z$eJ<_N2esL(2_YwV>#)*NMq;5{UbQCkaQs3=y9@=m8q>08T1H9W1*9}vKTsZdQP8x
z0kajuBBnApxUMLL<+PF|Gc8wT8In~|r!z=pNgYxamDRKcI*`+!8u!fXki!B0dMh0I
zq$TRofFoanL*0OMw;>wRoKcoPwfS>tLQe}4+$LBoqea*mq>t#@3;F+M&{~(RdIoL3
z0#=r6!^*oatw(=e%vw0Lp+`c`3UfAyTtme9qy9p})f%5`jiS~7oEcg!X&G>4tP>dX
z)?!RqkNL-|j9HARzzBo4AWG#d%sw^BDZiY!vn^rL|Jp
z1Y+3;R~NKU3Q^uf8N5}~iLnHPP?4*eDx7p(Zph7b0(7BCK|1M%v~=t_ND2@YR@&Bd
zaRZ_-qtHAibxju-eKxBvN8f#^
zNAAi!O&4w1PIu*&+-45Y#U85yU9{ovx2693Ko?6<^s;@~OWG^iOW>78=9Lh=($+H4
zh4Ju`S5__
zp@+J0`0)jj#s#Kxly4q&O#UGV{38UsT4`1Ri`F3i72}v4cp0Gc8n0sg+XyuTBp6;t
zXdu9yjqoPID#9AVIs)E|_y)ozLK^|+=UWJxOw3sDZsC-9+!uo}-^Pv|gk6L^1WZ-@
zHH2>=Tt&Es@EwHL5jqIpMfe^-=L%3l;P*}f?rF;CmSZtxK_oJfzX3`r50fI3@as5k
zAK^`e?<2g0@B@S&BD{_84#JNRevI%g!cP!>itrvl*NyQhRbg24WfFs^buV
zWE7!kWCv)Z2}IGPYw!Yr0BzI^th=-VQU!V{^cs9ujS77oWuO@(lg6tM&BriXrD{0?PCuthFjkj^rw0ShKYrA>XM$p~1sbE8$6-T?8m1blP
zWqVT&P+)Vqb_;93Ary^w4t;J?6|qQM7=Q1$yksm-hQO6YVY>6mBKxM}Fi-a`^G%
z?>BNQ701BO+Cg_TXBhv`#u=bczXHE#2Y?xj42*VKk~YycZJSWifi<+-HZhpVtU+#=
zZ|8@Fc0t?rpg45e4%Blj|EbX~u>vbXS!QJ0XgMb^vvI3MRDpTks2h$BPYATMsB(t=
z2jfwPxx!U5X}!z)!sGog0RPGkcl$x-rYCs6>vhwO;ADoe1V6N|!7q9Yn~+9g$lQbw
ztdWU93_+!RL}Evnz9~&c5=#~)gwcoex^YbI{NXl@OL1A2(s7ABF=RQb=OzXiI0_#0=}*7ko{
zJzMqZ73;Co~eBdEUu(Ta9|01?ISZ{a7Mq%LgP@Eb=&N8Cx8u8PgW+d#v=>rcvcp(&z{wtzy2R5
zHPR=wdQAE6;DpyE#%()p#&hv}ybv$qj+1qklk;*xE>0|cIy2=W|!21gB1APAW&XWqdd67uHPW}xaZbH`%49N8(>}p+5RD{YF
zc+GeYI+Z2vvZ2cLLOyhbvU`IN3TqGsd*BxG?!L0RW9BN;9gbAN9gDE*M&QU94@b(@
zcUaM2q;lL1SU6M`mfV85xs3~y#v{1yUS|+S5j;5F{6f}rsO+fg4m?%Zb-M@mUCxvh
z2@mE<9aQlXkB6N-?m~xm&mE|0Kj?Yf3%Xus7%~_rU{@Uhb6QdV6Hi(9`rdtI4Lvtd
z=AI{7OYn%`IKVBz`o(Z0`a|&Ks-lCSsVMS@z5$>`^DAm?ar-V-Bh{jH-7#8
zwHqp@AC)U7+7It{#C{(lWWsbRKTPRLkq3$#s{Dga8Wi)e17SPt>?(8MLR?1NV{IGp
zvLXXTcO#Vxx)8-kk**^9iroLg;6I0$J$Y?wKOA~nr$VJ*DuTuUV%-t>H3fboZlv95G67o*-Gw?6mfsCIe
z0k%zBNJ2Fem@*G{u`Dd#k~T25EuAT2(>osn4*;jiN;)n9mX_75p4W^Gy#n~!caoAU
z`7jn(<#SlEl(F^Wvy90f16x;sgG-vNfvMx!82NUF*(~>v-ZtX~at5%)X^Z9$;8H5*
zWDQvy*n0`aAj2;Qm$3HK=+l;zeQ<0Q*nCOUrFleH{vPlEza&j)UxfCR;Ipi!pnE=o
zo(3x}8FC&`d-O67M|*s#JxbaG@Q7-=tZl#}x?sSpm$NYyJ?4+kGA0{Q)g%5XS0IuN
zHgkZ_`}eetxM-{xfrYi#ah=68>rif`qp;^&m~QzT)0LB^$jK#tF4L8=kM;`M5Y?Jy
z(CjRM*>^#&=i&U8QFnrJX3c38DuV7T$(02Ilql7mVzMMHoqxqtccg{714?uSGBJH)
zC?u;>CBhucvDvdZsFmn{%)m#eL?wUi*yJBT=B{fcT8-D@^>`!R#C!p@Dav)ZAvd)W
zEz9L(L$1m-Rs<#5WHX>d%aCu&Y0iKWW!ZMx_EcQWh@X$Ej-!q%#Idg9SoU@7w2ewc
z@BaB)=K0gPc?iV#LZ(EGTO4_X7ZC8O^Af;Li?j=Y=dn)DbKw=v*$#VR$K^=AM430=
zz0ND(26ejq(FwkMo}eh9eRZd0^7p~uA0m8!koL8$e)J*Z$S=GK(0Y#7u>M_ySp?KB
zUPovkG!fu!*WmLA3kZt{_}a)<5LOY^5Y`b+15#f6X-$!^E0ED(8Jh!{=aV=DXb|n&H<^>`(`4%{-JS>W`1ZM~1
zUO~8w@I!=G5q^a58p4keeuD5i!W#&0BD{t0Q-rqx+D?QAA;#Q`V9ja0)9bc#QAcZ#
zBc{#m%4uD@2<^hJ;C%1kd=@?M+XqCJKC5jSA52?~Jtv`!2{SsDdRnkPj
z3{iG49*z!m0>H}*{2SqzhN`FqwBwHV&Lo4a8`JbcjpI1mnO*Ar>a7;uf)|VNzfq=XUq)
z?sFP$v6wUP{Ok7(@4wTA@oy@OKPC#-@P_XLe8XoQqn@O!&UD$VTS(20-A&a~%AXJGbpEg=XXXu>!*y{HhU03kd
z-eZQAHB;rft-ax(;k&|B6G`6VEfH{-dKP_t^w>xpyw~xDzXnJn#&}08W+NjuM~1N4
zwqz&keABlcnx+vM5rciow?_;%I!$VYBMr$!T4bbozz&$7dT2$Kv@m*SLs{Nn$Y~Uq
zg6;0L!pHx^gM!m>w}MW%mQ|+cE5{Fn>+PsiZ`d8|E3-FHj?1^Z?w(5XAlz{W0jEeS
zyVDAVN^!Th9jw`PXTKr%Fi=PbJn&Wa`nzv!ZZy90jqKT*+O=87L@x;hDU{OVsC6g67A&C*>6ORfXn_(v&oHB+~J;t}){oo#x`I
zn2=NL*#qoCSmwnXMp}`n1M3YV%HO>r%=nbdjaZaJZeHf$b72HKLf@G+7K~{6E8;p6
zVcc^}`31yjd1Ty4MYGXdbSj#U7Id^$
z{ER#$=jFo4rufQAydr1ioSzw4@hKmxg*uA3j^x*haOJXbXoj~>#CYbp*i}!&u6oGs
z{^`UjB|bhK_o1UZgYnGh=vI%STOEsT4KvEgX*nZn%@j^_ZsRkiQu;hTDg7o|xy9X9
zZ)H=s+W{JEKL|Z#_xioy3py2s@%fuyy#3d|efs4;A6$J>B>Nx~i8-eOl`z99r=z1%
zf}HW9&OZovKUB83>vxsY>~nA)&VIkQ4Kq5q%Jzo7t1P!WP#Je9`kqU?p!rWZpa;L-
zRW_xZcu%F8ojzzYrSl=qb)Yg^uD5&N<-W2*5rA-p1CVH-veb8Do4YNCzDn;kl9*Wg4TNjIv8Akzi&1O3WF3`FK5#t`SsJ=_lh#>yF}FhH^gP}l
zgo=5J?I?C%C3D0lnqCvA(ooYNPJwY1PzW~w2FtMw7;}lun-w<4rdffNS;;K0m(7w%
zZ5gx7D#*>7Rg?;>rY$q90$W(a@&YVgg`atoe*}*nIe+@Rgv>GT{sB6pc^6EIQ+mX^
zq-eygs9~gX#KhnX(z>)vCtPeDv1(KjsWuoIeC!bK9P{xX!4hCjIg$91s5$uc5bMTuK;OmnSse<~1izMwMtPng%CK#%1C)KmE|W
zV?|)GXcp)1WUXczF)2-ON<~h|X>j&}s7MRSo4E(3<0WaKZ5}*bgU<=_2_9PPZ9FZ#
z(1W%)uGKwg|7G|%>AU^R_Y3U8!rzXwi>&$CX#-=(g8vs}ZPoz4B>bO^XQZuHBpX+?
z64>#qP7CoI^k|`d>PQiax?DRhm&81DYEh=OLX_^_5>~t*3nQk};)qn0)X|D-vV{J?
z{ZKlfpVMlw^!&=hemPpf&d?eyGV#)}TF_2#{QUS%&}#8b_5rCyzP${!_#7*-qSazG
zIvt&f&PM0-daU{dc}AX<=d@ZZ%Efq9uFBJX0cx?}V@A;;)?!iWwSZcTt?0A*7uLcG
z3n$iM`MJ2RoQUJfp?UX{uki8yxDOrSWsGN8M|b5Yx+`PRJ&)1RU1Lcu%kxbKdQsZo
zb2y@Wo?wB1Zc2O!aBGd#Grb`28_mE)KDXI#id(q!1!DUzf2PO0#+S)@h2WW7oYr!|
zzelyJ1f&@JG{G5yvjpb|&Jz$t@RteJ2rdwOi{RS?Bo6!)0xb$X_FPo!||E?-Trh;D-cP0BbLS-veB11K!a5UoVDY=m$NK6!}#csr0U|
zS(m>_-(4eki-6dl|A>IDmHY<5O@bc-o(c!ibz|4(KcR-V3Em<2DZ$SO-X(YsP|t-F
zYB9ux8sj+5H=17E3HANCo)gE__2RR2{s9C5qv;!
zo8Uu&UlGuCd`vGUj`X78<5u75bZFY)7QoQjK>AQS(vBL`fbQN<4`@LNYEh*W>Hw`c
zRua;Z>ethfPteO+A=x8(-;l28y;PqFcRPX0dl$RV;FcFAe?7ALA40<&ZYR5jHcYY+
lNR~MH8=Ie=sTRe|6)gR*6I`V~Y$5eh)=F14Gfw8U{{zCgni2p2
diff --git a/utils/__pycache__/utils_loss.cpython-38.pyc b/utils/__pycache__/utils_loss.cpython-38.pyc
index 1dedf35f6309fd39f254f1f43d36e4058a917e08..ffa1b61e1ef8722200c6ba76c5a13b54386b10dd 100644
GIT binary patch
delta 1948
zcmZuy&2QXP5P$Fa`D=HxX_8H&v`MK@3Yd`4rV=2vq(Y^9HSD1g94sVjwa=SwvTJXj
z9SV_Gdq|~nphCenKBVLZLJDx_g7_19sN@SAkWi7haN`6s&rUWhJn4N{g&B?9Z``K{*fmA&d5{djNj%DckY;I8>P5vI`Y5uy2HlWkhNnrnm15*%dQ
z&m|?0*g^-1UP#IyDG384Mj@#Px=x((-d8V{GyB%7GVHB|QIvjXocaE*n{*x{kYyka
z&v@i_TJl&T$I}K|qt*0~eM#+fOJN-ltmnx`A|7Ed^r_Uof5j?&wq>IMl4VA&y
zw!DC%r;t32#6j{b5{BeBH@S$fLr+CUpBw$D0t+~YU<)i4&9`r=n2bCz{n*(skrlMy
zqv@ek{wK$uOouL|f0#cGrqPOKccqHv9IE48WxR7b3^wEwz&myx
zc@A@W-M%&ZB&x7h=LAeOG+js!?UnOGnB*mtj$-*~X__|oCTasXB+*qMK5?laE)%8u
zq(udT&aqw1=bJii!M|WahpynRA?6TvQDJBz@jqISzejE1D%%9#!+Ryj3=u|MsE#dmtjX}OK6oE
zbZ7FoSC>7n9kgT5b68%b7eVPLN`(PI#ISI#!;V}aF-hnyjp>#p-%99vtgrQXLiSmr
zCG3DHjw3fi1g(+24iXK{_yOx13A}Cc#Hf)O$W7@|)Lkc-tHbIW@BvuATyyknz~c{l
zK3sP@iX|qPjW@$?Z#(wAt&WEPsEi%+WDTI2(GEcCQt@1I?QWwTD1*#AvK(G~UL9cf
z66^@!CbU8=IyZnvmF-Ln(Q1YAZbOQC6Gz0+LVyrl1<*&p#AK7gW(gHgFAo^-08P#d
zP4EW**s)F_S2~4$p`#BHO^_{H-cI;F2VOs5lqC8Ip6}}i1kl&jQ%`^c)Qz(QkTw8M
zvo2qNfK(@DJO~3Hn%0)S*lxz{FpwDfV;nJ#p&BzY03OkPKPyA;d4niy#n{NI9V^{N
zwB-x0)rPSuv0e+{Guq4O9oOnLgoiq=(NkEv#-03CtrgVYMruC9@MK$3t9mQb{W!BYhD7HKPDn)Vw??
delta 886
zcmZWnO=}ZD7|!g>ekHpb%{C>iNv*_+V@#PM?W6%XbTVs;y?`%z{M7NJ|7>Rkjq2p*&te?agk>`}Z3o;-Nfd3Tp=wR?D&dFFlId7gLnd*f}@x?`Hfg7`lE
zyxU*bMphkdk6ITl4RA-jQg9RpKQB1gEo@4g+B4kI1*bYhaKufSqF^*f7L1%RB?sLu
zP<^y_o_|qhKh)3~YUtty2KP`BlgRZp*o0vg-<7T-laHlWNaGK2TS?FOYh1T2p^0%c
z^kd??y@1NB%*S}HRDp&G*!(wMQWdek$yDAZm#GP7O%R&hZr|%gQQ!5vZa1w@M2Dcn
z<}b-krquYnd~F3BCeFk?2CNAH^oyk+U^8MtF)IU^`%ikMR+4J|P7d28C`JD$+HkrE
zu+Kbfn|xJitt1KF=31E_qoq}!NDNFQ8*|D=(h;v~!o8_u{V=eKd
zq`~A#vpmQdP>}!Q0m}|#{H9WLnj6~Zr&FLpaVZ%3>9F01Re_UY>^}$u^tRcwAO=_8
z@4fi-^~v&&KjNchR)fy707n2IWGsvK^0(5|iL*Me*b4_S-U=VH#7zUr;jLogO^3mN
z&51SDQ|IZJ`91yC%rPjDHoSdfvO(J9WBu%WJ|?yV?15PxEmm!6q`tIAA?x2?6hA?W
qcdP|e^55e&`H6cNvLY1plvw3{*jeRFtL;SG2!mJEw5~
diff --git a/utils/__pycache__/utils_model.cpython-38.pyc b/utils/__pycache__/utils_model.cpython-38.pyc
index 7f25fea22c7383e2254140e87a520ffa8e859e25..3110170af2317adcc3d22e65c166353e174c5eb8 100644
GIT binary patch
delta 24
ecmZ1@zDArgl$V!_0SGKMa{6*J-kKcF9SQ(Ix&?Ru
delta 24
ecmZ1@zDArgl$V!_0SL@Da{6*JMokXq4g~-{F9i1h
diff --git a/utils/utils.py b/utils/utils.py
index c8340af..5ed5ee0 100644
--- a/utils/utils.py
+++ b/utils/utils.py
@@ -1,5 +1,5 @@
from sklearn import utils
-import torch, itertools, os, time, thop, json, cv2
+import torch, itertools, os, time, thop, json, cv2, math
import torch.nn as nn
import torchvision.transforms as transforms
import numpy as np
@@ -8,7 +8,7 @@
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
from math import cos, pi
-from sklearn.metrics import classification_report, confusion_matrix, cohen_kappa_score
+from sklearn.metrics import classification_report, confusion_matrix, cohen_kappa_score, precision_score, recall_score, f1_score, accuracy_score
from prettytable import PrettyTable
from copy import deepcopy
from argparse import Namespace
@@ -19,6 +19,7 @@
from pytorch_grad_cam.utils.image import show_cam_on_image
from collections import OrderedDict
from .utils_aug import rand_bbox
+from pycm import ConfusionMatrix
cnames = {
'aliceblue': '#F0F8FF',
@@ -162,6 +163,9 @@
'yellow': '#FFFF00',
'yellowgreen': '#9ACD32'}
+def str2float(data):
+ return (0.0 if type(data) is str else data)
+
def save_model(path, **ckpt):
torch.save(ckpt, path)
@@ -185,7 +189,6 @@ def mixup_data(x, opt, alpha=1.0):
raise 'Unsupported MixUp Methods.'
return mixed_x
-
def plot_train_batch(dataset, opt):
dataset.transform.transforms[-1] = transforms.ToTensor()
dataloader = iter(torch.utils.data.DataLoader(dataset, 16, shuffle=True))
@@ -247,37 +250,6 @@ def plot_log(opt):
plt.savefig(r'{}/learning_rate_curve.png'.format(opt.save_path))
-def plot_confusion_matrix(cm, classes, save_path, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues, name='test'):
- plt.figure(figsize=(min(len(classes), 30), min(len(classes), 30)))
- if normalize:
- cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
- trained_classes = classes
- plt.imshow(cm, interpolation='nearest', cmap=cmap)
- plt.title(name + title, fontsize=min(len(classes), 30)) # title font size
- tick_marks = np.arange(len(classes))
- plt.xticks(np.arange(len(trained_classes)), classes, rotation=90, fontsize=min(len(classes), 30)) # X tricks font size
- plt.yticks(tick_marks, classes, fontsize=min(len(classes), 30)) # Y tricks font size
- thresh = cm.max() / 2.
- for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
- plt.text(j, i, np.round(cm[i, j], 2), horizontalalignment="center",
- color="white" if cm[i, j] > thresh else "black", fontsize=min(len(classes), 30)) # confusion_matrix font size
- plt.ylabel('True label', fontsize=min(len(classes), 30)) # True label font size
- plt.xlabel('Predicted label', fontsize=min(len(classes), 30)) # Predicted label font size
- plt.tight_layout()
- plt.savefig(os.path.join(save_path, 'confusion_matrix.png'), dpi=150)
- plt.show()
-
-def save_confusion_matrix(cm, classes, save_path, normalize=True):
- if normalize:
- cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
- str_arr = []
- for class_, cm_ in zip(classes, cm):
- str_arr.append('{},{}'.format(class_, ','.join(list(map(lambda x:'{:.4f}'.format(x), list(cm_))))))
- str_arr.append(' ,{}'.format(','.join(classes)))
-
- with open(os.path.join(save_path, 'confusion_matrix.csv'), 'w+') as f:
- f.write('\n'.join(str_arr))
-
class WarmUpLR:
def __init__(self, optimizer, opt):
self.optimizer = optimizer
@@ -306,7 +278,26 @@ def adjust_lr(self):
1 + cos(pi * (self.current_epoch - self.warmup_epoch) / (self.max_epoch - self.warmup_epoch))) / 2
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
-
+
+ def state_dict(self):
+ return {
+ 'lr_min': self.lr_min,
+ 'lr_max': self.lr_max,
+ 'max_epoch': self.max_epoch,
+ 'current_epoch': self.current_epoch,
+ 'warmup_epoch': self.warmup_epoch,
+ 'lr_scheduler': self.lr_scheduler.state_dict(),
+ 'optimizer': self.optimizer.state_dict()
+ }
+
+ def load_state_dict(self, state_dict):
+ self.lr_min = state_dict['lr_min']
+ self.lr_max = state_dict['lr_max']
+ self.max_epoch = state_dict['max_epoch']
+ self.current_epoch = state_dict['current_epoch']
+ self.warmup_epoch = state_dict['warmup_epoch']
+ self.optimizer.load_state_dict(state_dict['optimizer'])
+ self.lr_scheduler.load_state_dict(state_dict['lr_scheduler'])
def show_config(opt):
table = PrettyTable()
@@ -327,6 +318,35 @@ def show_config(opt):
with open(os.path.join(opt['save_path'], 'param.json'), 'w+') as f:
f.write(json.dumps(opt, indent=4, separators={':', ','}))
+def plot_confusion_matrix(cm, classes, save_path, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues, name='test'):
+ plt.figure(figsize=(min(len(classes), 30), min(len(classes), 30)))
+ if normalize:
+ cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
+ trained_classes = classes
+ plt.imshow(cm, interpolation='nearest', cmap=cmap)
+ plt.title(name + title, fontsize=min(len(classes), 30)) # title font size
+ tick_marks = np.arange(len(classes))
+ plt.xticks(np.arange(len(trained_classes)), classes, rotation=90, fontsize=min(len(classes), 30)) # X tricks font size
+ plt.yticks(tick_marks, classes, fontsize=min(len(classes), 30)) # Y tricks font size
+ thresh = cm.max() / 2.
+ for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
+ plt.text(j, i, np.round(cm[i, j], 2), horizontalalignment="center",
+ color="white" if cm[i, j] > thresh else "black", fontsize=min(len(classes), 30)) # confusion_matrix font size
+ plt.ylabel('True label', fontsize=min(len(classes), 30)) # True label font size
+ plt.xlabel('Predicted label', fontsize=min(len(classes), 30)) # Predicted label font size
+ plt.tight_layout()
+ plt.savefig(os.path.join(save_path, 'confusion_matrix.png'), dpi=150)
+ plt.show()
+
+def save_confusion_matrix(cm, classes, save_path):
+ str_arr = []
+ for class_, cm_ in zip(classes, cm):
+ str_arr.append('{},{}'.format(class_, ','.join(list(map(lambda x:'{:.4f}'.format(x), list(cm_))))))
+ str_arr.append(' ,{}'.format(','.join(classes)))
+
+ with open(os.path.join(save_path, 'confusion_matrix.csv'), 'w+') as f:
+ f.write('\n'.join(str_arr))
+
def cal_cm(y_true, y_pred, CLASS_NUM):
y_true, y_pred = y_true.to('cpu').detach().numpy(), np.argmax(y_pred.to('cpu').detach().numpy(), axis=1)
y_true, y_pred = y_true.reshape((-1)), y_pred.reshape((-1))
@@ -385,58 +405,63 @@ def __init__(self, y_true, y_pred, class_num):
self.y_true = y_true
self.y_pred = y_pred
self.class_num = class_num
- self.result = {str(i):{} for i in range(self.class_num)}
-
- def cal_class_kappa(self):
- for i in range(self.class_num):
- y_true_class = np.where(self.y_true == i, 1, 0)
- y_pred_class = np.where(self.y_pred == i, 1, 0)
-
- self.result[str(i)]['kappa'] = cohen_kappa_score(y_true_class, y_pred_class)
+ self.result = {i:{} for i in range(self.class_num)}
+ self.metrice = ['PPV', 'TPR', 'AUC', 'AUPR', 'F05', 'F1', 'F2']
+ self.metrice_name = ['Precision', 'Recall', 'AUC', 'AUPR', 'F0.5', 'F1', 'F2', 'ACC']
def __call__(self):
- self.cal_class_kappa()
- return self.result
+ cm = ConfusionMatrix(self.y_true, self.y_pred)
+ for j in range(len(self.metrice)):
+ for i in range(self.class_num):
+ self.result[i][self.metrice_name[j]] = str2float(eval('cm.{}'.format(self.metrice[j]))[i])
+
+ return self.result, cm
def classification_metrice(y_true, y_pred, class_num, label, save_path):
- cm = confusion_matrix(y_true, y_pred, labels=list(range(class_num)))
+ metrice = Test_Metrice(y_true, y_pred, class_num)
+ class_report, cm = metrice()
+ class_pa = np.diag(cm.to_array(normalized=True)) # mean class accuracy
if class_num <= 50:
- plot_confusion_matrix(cm, label, save_path)
- save_confusion_matrix(cm, label, save_path)
- cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
- class_pa = np.diag(cm) # mean class accuracy
- class_report = classification_report(y_true, y_pred, output_dict=True)
- extra_class_report = Test_Metrice(y_true, y_pred, class_num)()
-
- cols_name = ['class', 'precision', 'recall', 'f1-score', 'kappa', 'accuracy']
-
- table = PrettyTable()
- table.title = 'Accuracy:{:.5f} MPA:{:.5f}'.format(class_report['accuracy'], np.mean(class_pa))
- table.field_names = cols_name
- for i in range(class_num):
- table.add_row([label[i],
- '{:.5f}'.format(class_report[str(i)]['precision']),
- '{:.5f}'.format(class_report[str(i)]['recall']),
- '{:.5f}'.format(class_report[str(i)]['f1-score']),
- '{:.5f}'.format(extra_class_report[str(i)]['kappa']),
- '{:.5f}'.format(class_pa[i])
- ])
+ plot_confusion_matrix(cm.to_array(), label, save_path)
+ save_confusion_matrix(cm.to_array(normalized=True), label, save_path)
+
+ table1_cols_name = ['class'] + metrice.metrice_name
+ table1 = PrettyTable()
+ table1.title = 'Per Class'
+ table1.field_names = table1_cols_name
+ with open(os.path.join(save_path, 'perclass_result.csv'), 'w+', encoding='utf-8') as f:
+ f.write(','.join(table1_cols_name) + '\n')
+ for i in range(class_num):
+ table1.add_row([label[i]] + ['{:.5f}'.format(class_report[i][j]) for j in table1_cols_name[1:-1]] + ['{:.5f}'.format(class_pa[i])])
+ f.write(','.join([label[i]] + ['{:.5f}'.format(class_report[i][j]) for j in table1_cols_name[1:-1]] + ['{:.5f}'.format(class_pa[i])]) + '\n')
+ print(table1)
+
+ table2_cols_name = ['Accuracy', 'MPA', 'Kappa', 'Precision_Micro', 'Recall_Micro', 'F1_Micro', 'Precision_Macro', 'Recall_Macro', 'F1_Macro']
+ table2 = PrettyTable()
+ table2.title = 'Overall'
+ table2.field_names = table2_cols_name
+ with open(os.path.join(save_path, 'overall_result.csv'), 'w+', encoding='utf-8') as f:
+ data = ['{:.5f}'.format(str2float(cm.Overall_ACC)),
+ '{:.5f}'.format(np.mean(class_pa)),
+ '{:.5f}'.format(str2float(cm.Kappa)),
+ '{:.5f}'.format(str2float(cm.PPV_Micro)),
+ '{:.5f}'.format(str2float(cm.TPR_Micro)),
+ '{:.5f}'.format(str2float(cm.F1_Micro)),
+ '{:.5f}'.format(str2float(cm.PPV_Macro)),
+ '{:.5f}'.format(str2float(cm.TPR_Macro)),
+ '{:.5f}'.format(str2float(cm.F1_Macro)),
+ ]
+
+ table2.add_row(data)
+
+ f.write(','.join(table2_cols_name) + '\n')
+ f.write(','.join(data))
+ print(table2)
- print(table)
with open(os.path.join(save_path, 'result.txt'), 'w+', encoding='utf-8') as f:
- f.write(str(table))
-
- with open(os.path.join(save_path, 'result.csv'), 'w+', encoding='utf-8') as f:
- f.write(','.join(cols_name) + '\n')
- f.write('\n'.join(['{},{}'.format(label[i], ','.join(
- [
- '{:.5f}'.format(class_report[str(i)]['precision']),
- '{:.5f}'.format(class_report[str(i)]['recall']),
- '{:.5f}'.format(class_report[str(i)]['f1-score']),
- '{:.5f}'.format(extra_class_report[str(i)]['kappa']),
- '{:.5f}'.format(class_pa[i])
- ]
- )) for i in range(class_num)]))
+ f.write(str(table1))
+ f.write('\n')
+ f.write(str(table2))
def update_opt(a, b):
b = vars(b)
@@ -608,9 +633,10 @@ def visual_tsne(feature, y_true, path, labels, save_path):
f.write('\n'.join(['{},{},{:.0f},{:.0f}'.format(i, labels[j], k[0], k[1]) for i, j, k in zip(path, y_true, feature_tsne)]))
-def predict_single_image(path, model, test_transform, DEVICE):
+def predict_single_image(path, model, test_transform, DEVICE, half=False):
pil_img = Image.open(path)
tensor_img = test_transform(pil_img).unsqueeze(0).to(DEVICE)
+ tensor_img = (tensor_img.half() if half else tensor_img)
if len(tensor_img.shape) == 5:
tensor_img = tensor_img.reshape((tensor_img.size(0) * tensor_img.size(1), tensor_img.size(2), tensor_img.size(3), tensor_img.size(4)))
pred_result = torch.softmax(model(tensor_img).mean(0), 0)
@@ -620,13 +646,10 @@ def predict_single_image(path, model, test_transform, DEVICE):
class cam_visual:
def __init__(self, model, test_transform, DEVICE, target_layers, opt):
- self.model = model
self.test_transform = test_transform
self.DEVICE = DEVICE
- self.target_layers = target_layers
- self.opt = opt
- self.cam_model = eval(opt.cam_type)(model=model, target_layers=[target_layers], use_cuda=torch.cuda.is_available())
+ self.cam_model = eval(opt.cam_type)(model=deepcopy(model).float(), target_layers=[target_layers], use_cuda=torch.cuda.is_available())
def __call__(self, path, label):
pil_img = Image.open(path)
@@ -693,3 +716,37 @@ def dict_to_PrettyTable(data, name):
table.field_names = data_keys
table.add_row(['{:.5f}'.format(data[i]) for i in data_keys])
return str(table)
+
+def is_parallel(model):
+ # Returns True if model is of type DP or DDP
+ return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
+
+def de_parallel(model):
+ # De-parallelize a model: returns single-GPU model if model is of type DP or DDP
+ return model.module if is_parallel(model) else model
+
+class ModelEMA:
+ """ Updated Exponential Moving Average (EMA) from https://github.com/rwightman/pytorch-image-models
+ Keeps a moving average of everything in the model state_dict (parameters and buffers)
+ For EMA details see https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
+ """
+
+ def __init__(self, model, decay=0.9999, tau=2000, updates=0):
+ # Create EMA
+ self.ema = deepcopy(de_parallel(model)).eval() # FP32 EMA
+ self.updates = updates # number of EMA updates
+ self.decay = lambda x: decay * (1 - math.exp(-x / tau)) # decay exponential ramp (to help early epochs)
+ for p in self.ema.parameters():
+ p.requires_grad_(False)
+
+ def update(self, model):
+ # Update EMA parameters
+ self.updates += 1
+ d = self.decay(self.updates)
+
+ msd = de_parallel(model).state_dict() # model state_dict
+ for k, v in self.ema.state_dict().items():
+ if v.dtype.is_floating_point: # true for FP16 and FP32
+ v *= d
+ v += (1 - d) * msd[k].detach()
+ # assert v.dtype == msd[k].dtype == torch.float32, f'{k}: EMA {v.dtype} and model {msd[k].dtype} must be FP32'
\ No newline at end of file
diff --git a/utils/utils_aug.py b/utils/utils_aug.py
index 5564e6d..8e9e6b7 100644
--- a/utils/utils_aug.py
+++ b/utils/utils_aug.py
@@ -3,6 +3,7 @@
import numpy as np
from PIL import Image
from copy import deepcopy
+import albumentations as A
def get_mean_and_std(dataset, opt):
'''Compute the mean and std value of dataset.'''
@@ -160,4 +161,17 @@ def __call__(self, img):
return Image.fromarray(np.array(img * mask, dtype=np.uint8))
def __str__(self):
- return 'CutOut'
\ No newline at end of file
+ return 'CutOut'
+
+class Create_Albumentations_From_Name(object):
+ # https://albumentations.ai/docs/api_reference/augmentations/transforms/
+ def __init__(self, name, **kwargs):
+ self.name = name
+ self.transform = eval('A.{}'.format(name))(**kwargs)
+
+ def __call__(self, img):
+ img = np.array(img)
+ return Image.fromarray(np.array(self.transform(image=img)['image'], dtype=np.uint8))
+
+ def __str__(self):
+ return self.name
\ No newline at end of file
diff --git a/utils/utils_fit.py b/utils/utils_fit.py
index de4e910..6c2ae7f 100644
--- a/utils/utils_fit.py
+++ b/utils/utils_fit.py
@@ -1,118 +1,141 @@
import torch, tqdm
import numpy as np
+from copy import deepcopy
from .utils_aug import mixup_data, mixup_criterion
from .utils import Train_Metrice
+import time
-def fitting(model, loss, optimizer, train_dataset, test_dataset, CLASS_NUM, DEVICE, scaler, show_thing, opt):
- model.to(DEVICE)
+def fitting(model, ema, loss, optimizer, train_dataset, test_dataset, CLASS_NUM, DEVICE, scaler, show_thing, opt):
model.train()
metrice = Train_Metrice(CLASS_NUM)
for x, y in tqdm.tqdm(train_dataset, desc='{} Train Stage'.format(show_thing)):
- x, y = x.to(DEVICE), y.to(DEVICE).long()
+ x, y = x.to(DEVICE).float(), y.to(DEVICE).long()
with torch.cuda.amp.autocast(opt.amp):
- if opt.mixup != 'none' and np.random.rand() > 0.5:
- x_mixup, y_a, y_b, lam = mixup_data(x, y, opt)
- pred = model(x_mixup.float())
- l = mixup_criterion(loss, pred, y_a, y_b, lam)
- pred = model(x.float())
+ if opt.rdrop:
+ if opt.mixup != 'none' and np.random.rand() > 0.5:
+ x_mixup, y_a, y_b, lam = mixup_data(x, y, opt)
+ pred = model(x_mixup)
+ pred2 = model(x_mixup)
+ l = mixup_criterion(loss, [pred, pred2], y_a, y_b, lam)
+ pred = model(x)
+ else:
+ pred = model(x)
+ pred2 = model(x)
+ l = loss([pred, pred2], y)
else:
- pred = model(x.float())
- l = loss(pred, y)
+ if opt.mixup != 'none' and np.random.rand() > 0.5:
+ x_mixup, y_a, y_b, lam = mixup_data(x, y, opt)
+ pred = model(x_mixup)
+ l = mixup_criterion(loss, pred, y_a, y_b, lam)
+ pred = model(x)
+ else:
+
+ pred = model(x)
+ l = loss(pred, y)
+
metrice.update_loss(float(l.data))
metrice.update_y(y, pred)
-
+
scaler.scale(l).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
-
- model.eval()
- with torch.no_grad():
+ if ema:
+ ema.update(model)
+
+ if ema:
+ model_eval = ema.ema
+ else:
+ model_eval = model.eval()
+ with torch.inference_mode():
for x, y in tqdm.tqdm(test_dataset, desc='{} Test Stage'.format(show_thing)):
- x, y = x.to(DEVICE), y.to(DEVICE).long()
+ x, y = x.to(DEVICE).float(), y.to(DEVICE).long()
with torch.cuda.amp.autocast(opt.amp):
if opt.test_tta:
bs, ncrops, c, h, w = x.size()
- pred = model(x.view(-1, c, h, w))
+ pred = model_eval(x.view(-1, c, h, w))
pred = pred.view(bs, ncrops, -1).mean(1)
l = loss(pred, y)
else:
- pred = model(x.float())
+ pred = model_eval(x)
l = loss(pred, y)
metrice.update_loss(float(l.data), isTest=True)
metrice.update_y(y, pred, isTest=True)
- return model, metrice.get()
+ return metrice.get()
-def fitting_distill(teacher_model, student_model, loss, kd_loss, optimizer, train_dataset, test_dataset, CLASS_NUM,
+def fitting_distill(teacher_model, student_model, ema, loss, kd_loss, optimizer, train_dataset, test_dataset, CLASS_NUM,
DEVICE, scaler, show_thing, opt):
- teacher_model.to(DEVICE)
- teacher_model.eval()
- student_model.to(DEVICE)
student_model.train()
metrice = Train_Metrice(CLASS_NUM)
for x, y in tqdm.tqdm(train_dataset, desc='{} Train Stage'.format(show_thing)):
- x, y = x.to(DEVICE), y.to(DEVICE).long()
+ x, y = x.to(DEVICE).float(), y.to(DEVICE).long()
with torch.cuda.amp.autocast(opt.amp):
if opt.mixup != 'none' and np.random.rand() > 0.5:
x_mixup, y_a, y_b, lam = mixup_data(x, y, opt)
- s_features, s_features_fc, s_pred = student_model(x_mixup.float(), need_fea=True)
- t_features, t_features_fc, t_pred = teacher_model(x_mixup.float(), need_fea=True)
+ s_features, s_features_fc, s_pred = student_model(x_mixup, need_fea=True)
+ t_features, t_features_fc, t_pred = teacher_model(x_mixup, need_fea=True)
l = mixup_criterion(loss, s_pred, y_a, y_b, lam)
- if str(kd_loss) in ['SoftTarget']:
- kd_l = kd_loss(s_pred, t_pred)
- pred = student_model(x.float())
+ pred = student_model(x)
else:
- s_features, s_features_fc, s_pred = student_model(x.float(), need_fea=True)
- t_features, t_features_fc, t_pred = teacher_model(x.float(), need_fea=True)
+ s_features, s_features_fc, s_pred = student_model(x, need_fea=True)
+ t_features, t_features_fc, t_pred = teacher_model(x, need_fea=True)
l = loss(s_pred, y)
- if str(kd_loss) in ['SoftTarget']:
- kd_l = kd_loss(s_pred, t_pred)
- elif str(kd_loss) in ['MGD']:
- kd_l = kd_loss(s_features[-1], t_features[-1])
- elif str(kd_loss) in ['SP']:
- kd_l = kd_loss(s_features[2], t_features[2]) + kd_loss(s_features[3], t_features[3])
- elif str(kd_loss) in ['AT']:
- kd_l = kd_loss(s_features[2], t_features[2]) + kd_loss(s_features[3], t_features[3])
+ if str(kd_loss) in ['SoftTarget']:
+ kd_l = kd_loss(s_pred, t_pred)
+ elif str(kd_loss) in ['MGD']:
+ kd_l = kd_loss(s_features[-1], t_features[-1])
+ elif str(kd_loss) in ['SP']:
+ kd_l = kd_loss(s_features[2], t_features[2]) + kd_loss(s_features[3], t_features[3])
+ elif str(kd_loss) in ['AT']:
+ kd_l = kd_loss(s_features[2], t_features[2]) + kd_loss(s_features[3], t_features[3])
- if str(kd_loss) in ['SoftTarget', 'SP', 'MGD']:
- kd_l *= (opt.kd_ratio / (1 - opt.kd_ratio)) if opt.kd_ratio < 1 else opt.kd_ratio
- elif str(kd_loss) in ['AT']:
- kd_l *= opt.kd_ratio
+ if str(kd_loss) in ['SoftTarget', 'SP', 'MGD']:
+ kd_l *= (opt.kd_ratio / (1 - opt.kd_ratio)) if opt.kd_ratio < 1 else opt.kd_ratio
+ elif str(kd_loss) in ['AT']:
+ kd_l *= opt.kd_ratio
metrice.update_loss(float(l.data))
metrice.update_loss(float(kd_l.data), isKd=True)
- metrice.update_y(y, s_pred)
+ if opt.mixup != 'none':
+ metrice.update_y(y, pred)
+ else:
+ metrice.update_y(y, s_pred)
scaler.scale(l + kd_l).backward()
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
-
- student_model.eval()
- with torch.no_grad():
+ if ema:
+ ema.update(student_model)
+
+ if ema:
+ model_eval = ema.ema
+ else:
+ model_eval = student_model.eval()
+ with torch.inference_mode():
for x, y in tqdm.tqdm(test_dataset, desc='{} Test Stage'.format(show_thing)):
- x, y = x.to(DEVICE), y.to(DEVICE).long()
+ x, y = x.to(DEVICE).float(), y.to(DEVICE).long()
with torch.cuda.amp.autocast(opt.amp):
if opt.test_tta:
bs, ncrops, c, h, w = x.size()
- pred = student_model(x.view(-1, c, h, w))
+ pred = model_eval(x.view(-1, c, h, w))
pred = pred.view(bs, ncrops, -1).mean(1)
l = loss(pred, y)
else:
- pred = student_model(x.float())
+ pred = model_eval(x)
l = loss(pred, y)
metrice.update_loss(float(l.data), isTest=True)
metrice.update_y(y, pred, isTest=True)
- return student_model, metrice.get()
\ No newline at end of file
+ return metrice.get()
\ No newline at end of file
diff --git a/utils/utils_loss.py b/utils/utils_loss.py
index f728e8e..655a40b 100644
--- a/utils/utils_loss.py
+++ b/utils/utils_loss.py
@@ -3,7 +3,7 @@
import torch.nn.functional as F
from torch import Tensor
-__all__ = ['PolyLoss', 'CrossEntropyLoss', 'FocalLoss']
+__all__ = ['PolyLoss', 'CrossEntropyLoss', 'FocalLoss', 'RDropLoss']
class PolyLoss(torch.nn.Module):
"""
@@ -45,4 +45,35 @@ def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
ce = -1 * input_logsoftmax * target_onehot_labelsmoothing
fl = torch.pow((1 - input_softmax), self.gamma) * ce
fl = fl.sum(1) * self.weight[target.long()]
- return fl.mean()
\ No newline at end of file
+ return fl.mean()
+
+class RDropLoss(nn.Module):
+ def __init__(self, loss, a=0.3):
+ super(RDropLoss, self).__init__()
+ self.loss = loss
+ self.a = a
+
+ def forward(self, input, target: torch.Tensor) -> torch.Tensor:
+ if type(input) is list:
+ input1, input2 = input
+ main_loss = (self.loss(input1, target) + self.loss(input2, target)) * 0.5
+ kl_loss = self.compute_kl_loss(input1, input2)
+ return main_loss + self.a * kl_loss
+ else:
+ return self.loss(input, target)
+
+ def compute_kl_loss(self, p, q, pad_mask=None):
+ p_loss = F.kl_div(F.log_softmax(p, dim=-1), F.softmax(q, dim=-1), reduction='none')
+ q_loss = F.kl_div(F.log_softmax(q, dim=-1), F.softmax(p, dim=-1), reduction='none')
+
+ # pad_mask is for seq-level tasks
+ if pad_mask is not None:
+ p_loss.masked_fill_(pad_mask, 0.)
+ q_loss.masked_fill_(pad_mask, 0.)
+
+ # You can choose whether to use function "sum" and "mean" depending on your task
+ p_loss = p_loss.sum()
+ q_loss = q_loss.sum()
+
+ loss = (p_loss + q_loss) / 2
+ return loss
\ No newline at end of file
diff --git a/v1.1-update_log.md b/v1.1-update_log.md
new file mode 100644
index 0000000..52f4f14
--- /dev/null
+++ b/v1.1-update_log.md
@@ -0,0 +1,200 @@
+# pytorch-classifier v1.1 更新日志
+
+- **2022.11.8**
+ 1. 修改processing.py的分配数据集逻辑,之前是先分出test_size的数据作为测试集,然后再从剩下的数据里面分val_size的数据作为验证集,这种分数据的方式,当我们的val_size=0.2和test_size=0.2,最后出来的数据集比例不是严格等于6:2:2,现在修改为等比例的划分,也就是现在的逻辑分割数据集后严格等于6:2:2.
+ 2. 参考yolov5,训练中的模型保存改为FP16保存.(在精度基本保持不变的情况下,模型相比FP32小一半)
+ 3. metrice.py和predict.py新增支持FP16推理.(在精度基本保持不变的情况下,速度更加快)
+
+- **2022.11.9**
+ 1. 支持(albumentations库)[https://github.com/albumentations-team/albumentations]的数据增强.
+ 2. 训练过程新增[R-Drop](https://github.com/dropreg/R-Drop),具体在main.py中添加--rdrop参数即可.
+
+- **2022.11.10**
+ 1. 利用Pycm库进行修改metrice.py中的可视化内容.增加指标种类.
+
+- **2022.11.11**
+ 1. 支持EMA(Exponential Moving Average),具体在main.py中添加--ema参数即可.
+ 2. 修改早停法中的--patience机制,当--patience参数为0时,停止使用早停法.
+ 3. 知识蒸馏中增加了一些实验数据.
+ 4. 修复一些bug.
+
+### FP16推理实验:
+
+实验环境:
+
+| System | CPU | GPU | RAM |
+| :----: | :----: | :----: | :----: |
+| Ubuntu | i9-12900KF | RTX-3090 | 32G |
+
+训练mobilenetv2:
+
+ python main.py --model_name mobilenetv2 --config config/config.py --save_path runs/mobilenetv2 --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+训练resnext50:
+
+ python main.py --model_name resnext50 --config config/config.py --save_path runs/resnext50 --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+训练RepVGG-A0:
+
+ python main.py --model_name RepVGG-A0 --config config/config.py --save_path runs/RepVGG-A0 --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+训练densenet121:
+
+ python main.py --model_name densenet121 --config config/config.py --save_path runs/densenet121 --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+计算各个模型的指标:
+
+ python metrice.py --task val --save_path runs/mobilenetv2
+ python metrice.py --task val --save_path runs/resnext50
+ python metrice.py --task val --save_path runs/RepVGG-A0
+ python metrice.py --task val --save_path runs/densenet121
+
+ python metrice.py --task val --save_path runs/mobilenetv2 --half
+ python metrice.py --task val --save_path runs/resnext50 --half
+ python metrice.py --task val --save_path runs/RepVGG-A0 --half
+ python metrice.py --task val --save_path runs/densenet121 --half
+
+计算各个模型的fps:
+
+ python metrice.py --task fps --save_path runs/mobilenetv2
+ python metrice.py --task fps --save_path runs/resnext50
+ python metrice.py --task fps --save_path runs/RepVGG-A0
+ python metrice.py --task fps --save_path runs/densenet121
+
+ python metrice.py --task fps --save_path runs/mobilenetv2 --half
+ python metrice.py --task fps --save_path runs/resnext50 --half
+ python metrice.py --task fps --save_path runs/RepVGG-A0 --half
+ python metrice.py --task fps --save_path runs/densenet121 --half
+
+| model | val accuracy(train stage) | val accuracy(test stage) | val accuracy half(test stage) | FP32 FPS(batch_size=64) | FP16 FPS(batch_size=64) |
+| :----: | :----: | :----: | :----: | :----: | :----: |
+| mobilenetv2 | 0.74284 | 0.74340 | 0.74396 | 52.43 | 92.80 |
+| resnext50 | 0.80966 | 0.80966 | 0.80966 | 19.48 | 30.28 |
+| RepVGG-A0 | 0.73666 | 0.73666 | 0.73666 | 54.74 | 98.87 |
+| densenet121 | 0.77035 | 0.77148 | 0.77035 | 18.87 | 32.75 |
+
+### R-Drop实验:
+
+训练mobilenetv2:
+
+ python main.py --model_name mobilenetv2 --config config/config.py --save_path runs/mobilenetv2 --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+ python main.py --model_name mobilenetv2 --config config/config.py --save_path runs/mobilenetv2_rdrop --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd --rdrop
+
+训练resnext50:
+
+ python main.py --model_name resnext50 --config config/config.py --save_path runs/resnext50 --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+ python main.py --model_name resnext50 --config config/config.py --save_path runs/resnext50_rdrop --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd --rdrop
+
+训练ghostnet:
+
+ python main.py --model_name ghostnet --config config/config.py --save_path runs/ghostnet --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+ python main.py --model_name ghostnet --config config/config.py --save_path runs/ghostnet_rdrop --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd --rdrop
+
+训练efficientnet_v2_s:
+
+ python main.py --model_name efficientnet_v2_s --config config/config.py --save_path runs/efficientnet_v2_s --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+ python main.py --model_name efficientnet_v2_s --config config/config.py --save_path runs/efficientnet_v2_s_rdrop --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd --rdrop
+
+计算各个模型的指标:
+
+ python metrice.py --task val --save_path runs/mobilenetv2
+ python metrice.py --task val --save_path runs/mobilenetv2_rdrop
+ python metrice.py --task val --save_path runs/resnext50
+ python metrice.py --task val --save_path runs/resnext50_rdrop
+ python metrice.py --task val --save_path runs/ghostnet
+ python metrice.py --task val --save_path runs/ghostnet_rdrop
+ python metrice.py --task val --save_path runs/efficientnet_v2_s
+ python metrice.py --task val --save_path runs/efficientnet_v2_s_rdrop
+
+ python metrice.py --task test --save_path runs/mobilenetv2
+ python metrice.py --task test --save_path runs/mobilenetv2_rdrop
+ python metrice.py --task test --save_path runs/resnext50
+ python metrice.py --task test --save_path runs/resnext50_rdrop
+ python metrice.py --task test --save_path runs/ghostnet
+ python metrice.py --task test --save_path runs/ghostnet_rdrop
+ python metrice.py --task test --save_path runs/efficientnet_v2_s
+ python metrice.py --task test --save_path runs/efficientnet_v2_s_rdrop
+
+| model | val accuracy | val accuracy(r-drop) | test accuracy | test accuracy(r-drop) |
+| :----: | :----: | :----: |
+| mobilenetv2 | 0.74340 | 0.75126 | 0.73784 | 0.73741 |
+| resnext50 | 0.80966 | 0.81134 | 0.82437 | 0.82092 |
+| ghostnet | 0.77597 | 0.76698 | 0.76625 | 0.77012 |
+| efficientnet_v2_s | 0.84166 | 0.85289 | 0.84460 | 0.85837 |
+
+### EMA实验:
+
+训练mobilenetv2:
+
+ python main.py --model_name mobilenetv2 --config config/config.py --save_path runs/mobilenetv2 --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+ python main.py --model_name mobilenetv2 --config config/config.py --save_path runs/mobilenetv2_ema --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd --ema
+
+训练resnext50:
+
+ python main.py --model_name resnext50 --config config/config.py --save_path runs/resnext50 --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+ python main.py --model_name resnext50 --config config/config.py --save_path runs/resnext50_ema --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd --ema
+
+训练ghostnet:
+
+ python main.py --model_name ghostnet --config config/config.py --save_path runs/ghostnet --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+ python main.py --model_name ghostnet --config config/config.py --save_path runs/ghostnet_ema --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd --ema
+
+训练efficientnet_v2_s:
+
+ python main.py --model_name efficientnet_v2_s --config config/config.py --save_path runs/efficientnet_v2_s --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd
+
+ python main.py --model_name efficientnet_v2_s --config config/config.py --save_path runs/efficientnet_v2_s_ema --lr 1e-4 --Augment AutoAugment --epoch 150 \
+ --pretrained --amp --warmup --imagenet_meanstd --ema
+
+计算各个模型的指标:
+
+ python metrice.py --task val --save_path runs/mobilenetv2
+ python metrice.py --task val --save_path runs/mobilenetv2_ema
+ python metrice.py --task val --save_path runs/resnext50
+ python metrice.py --task val --save_path runs/resnext50_ema
+ python metrice.py --task val --save_path runs/ghostnet
+ python metrice.py --task val --save_path runs/ghostnet_ema
+ python metrice.py --task val --save_path runs/efficientnet_v2_s
+ python metrice.py --task val --save_path runs/efficientnet_v2_s_ema
+
+ python metrice.py --task test --save_path runs/mobilenetv2
+ python metrice.py --task test --save_path runs/mobilenetv2_ema
+ python metrice.py --task test --save_path runs/resnext50
+ python metrice.py --task test --save_path runs/resnext50_ema
+ python metrice.py --task test --save_path runs/ghostnet
+ python metrice.py --task test --save_path runs/ghostnet_ema
+ python metrice.py --task test --save_path runs/efficientnet_v2_s
+ python metrice.py --task test --save_path runs/efficientnet_v2_s_ema
+
+| model | val accuracy | val accuracy(ema) | test accuracy | test accuracy(ema) |
+| :----: | :----: | :----: |
+| mobilenetv2 | 0.74340 | 0.74958 | 0.73784 | 0.73870 |
+| resnext50 | 0.80966 | 0.81246 | 0.82437 | 0.82307 |
+| ghostnet | 0.77597 | 0.77765 | 0.76625 | 0.77142 |
+| efficientnet_v2_s | 0.84166 | 0.83998 | 0.84460 | 0.83986 |
\ No newline at end of file