"experiments/vscode:/vscode.git/clone" did not exist on "d6d1d6d0a3fcfe5632709ef43d642f6e5d03bc3e"
Commit c3426f6e authored by Sugon_ldc's avatar Sugon_ldc
Browse files

add shufflenetv2 model

parents
File added
This diff is collapsed.
[0, null, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, null, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, null, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, null, 1, 1, 1, null, 1, 1, 0, 0, 0, 0, 1, 1, null, 0, 0, 1, 1, 1, 0, 0, null, 1, null, 0, 0, null, 1, null, 0, 0, null, 1, 1, 0, 0, null, 1, 1, null, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, null, 0, 0, null, 1, 1, 0, 0, 0, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 0, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, null, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, null, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, null, 1, 1, null, 0, 0, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, null, 1, 1, null, 0, 0, 0, null, 1, 1, null, 0, 0, 0, 0, 0, 1, 1, 1, null, 0, 0, 0, 1, 1, null, 0, 0, 0, 0, null, 1, null]
\ No newline at end of file
File added
This diff is collapsed.
import cv2
import os
import json
from libAIF import aif
import numpy as np
from label import RPPGLabel
from collections import deque
from multiprocessing import Process, Pool
import math
def make_feature_aif_databse(outputfile, basefolder, if_src_raw=False, expected_shape=(49,49,3)):
aif_allftimg = aif(outputfile+"_ftimgd", outputfile+"_ftimgi")
aif_allftimg.wbegin()
all_labels = []
fh_label = open(outputfile + "_label.json", 'w')
for root, dirs, files in os.walk(basefolder):
for dname in dirs:
prev_path = os.path.join(root, dname) + "\\"
# xsplit = prev_path.split("\\")
dfilename = prev_path + "datafile"
if os.path.exists(dfilename) and os.path.dirname(dfilename)[-1] == '1':# get only the first video
print("on going: " + dfilename)
#label src file
src_label_fname = root + "\\" + "bvp_" + root.split("\\")[-1] + "_T" + os.path.dirname(dfilename)[-1] + ".csv"
print("corresponding label: " + src_label_fname)
rppg_lab = RPPGLabel(src_label_fname)
#features & labels
aif_inst = aif(dfilename, prev_path + "idxfile")
dat_info = aif_inst.rbegin()
for i in range(0, len(dat_info), 1):
if if_src_raw:
img0 = (aif_inst.extract_raw(i)).copy()
else:
img0 = aif_inst.extract(i)
# img0 = cv2.resize(img0, (144, 144))
if (i + 1) < len(dat_info):
if if_src_raw:
img1 = (aif_inst.extract_raw(i + 1)).copy()
else:
img1 = aif_inst.extract(i + 1)
if img1.shape == expected_shape and img0.shape == expected_shape:
# adjust to the median position
ftimg = img1 - img0 + 128
aif_allftimg.append(ftimg)# features
all_labels.append(rppg_lab.get_label_by_frame(i)) #labels
else:
aif_allftimg.append_any(np.array([]).tobytes())
all_labels.append(None)
else:# end of a file, append an empty node
aif_allftimg.append_any(np.array([]).tobytes())
all_labels.append(None)
aif_inst.rfinish()
json.dump(all_labels, fh_label)
fh_label.close()
aif_allftimg.wfinish()
#300 frames * 478 landmarks * 5 cross pixels * 3 channels
#every 5 pixel makes a cross
#dat shape=(300,49,49,3)
#return 478*3 (mean), 478*3 (std)
def get_norm(dat):
ret_mean = []
ret_std = []
for i in range(0, 478): # cross
c1=[]
c2=[]
c3=[]
for f in range(0, len(dat)): # frames
for j in range(0, 5): # pixels
px_idx = (i * 5) + j
px_ridx = px_idx // 49
px_cidx = px_idx % 49
c1.append(dat[f][px_ridx][px_cidx][0])
c2.append(dat[f][px_ridx][px_cidx][1])
c3.append(dat[f][px_ridx][px_cidx][2])
ret_mean.append([np.mean(c1), np.mean(c2), np.mean(c3)])
ret_std.append([np.std(c1), np.std(c2), np.std(c3)])
return [ret_mean, ret_std]
#feat: 49*49*3, every 5 as a cross with 1 norm
#norm: 478*3 (mean), 478*3 (std)
def feat_normalize(feat, norm, m_offset=0):
ret = []
npixel = feat.shape[0] * feat.shape[1]
norm = norm.reshape(-1, 478, 3)
for i in range(0, npixel):
irow = i // 49
icol = i % 49
if i < 478*5:
tar_vec = [0,0,0]
for z in range(0, 3):
if norm[1][i//5][z] == 0:
if feat[irow][icol][z] >= 0:
tar_vec[z] = 4
else:
tar_vec[z] = -4
else:
tar_vec[z] = (feat[irow][icol][z] - m_offset - norm[0][i//5][z]) / norm[1][i//5][z]
ret.append(np.array(tar_vec))
else:
ret.append(np.array([0,0,0]))
ret = np.array(ret, dtype='float32')
return ret.reshape(49,49,3)
#prefix + digi + appendix
def merge_aifs_digiapdx(aiffolder, prefix,\
appendix_d, appendix_i, begnum, endnum, destpath,\
extract="image", dtype="float32"):
merged_aif = aif(destpath + "_dfile", destpath + "_ifile")
merged_aif.wbegin()
for i in range(begnum, endnum + 1):
src_d_path = aiffolder + prefix + str(i) + appendix_d
src_i_path = aiffolder + prefix + str(i) + appendix_i
if os.path.exists(src_d_path) and os.path.exists(src_i_path):
print(src_i_path)
src_aif = aif(src_d_path, src_i_path)
info = src_aif.rbegin()
buf = None
for j in range(0, len(info)):
if extract == "image":
buf = src_aif.extract(j)
merged_aif.append(buf)
elif extract == "bytes":
buf = src_aif.extract_raw(j, dtype=dtype)
merged_aif.append_any(np.array(buf, dtype=dtype).tobytes())
src_aif.rfinish()
merged_aif.wfinish()
def merge_jsons(jsonfolder, prefix, appendix, begnum, endum, destpath):
dat = []
for i in range(begnum, endum + 1):
filepath = jsonfolder + prefix + str(i) + appendix
if os.path.exists(filepath):
print(filepath)
srcfh = open(filepath, 'r')
dat.extend(json.load(srcfh))
srcfh.close()
fh = open(destpath, 'w')
json.dump(dat, fh)
fh.close()
def _make_norm_ldmk_MTCore(\
outname_pre, subject_code, src_dfile, src_ifile, src_labfile,\
norm_len, interval=5):
if_src_raw = False
expected_shape=(49,49,3)
aif_dst_ftimg = aif(outname_pre + subject_code +"_ftimgd", outname_pre + subject_code +"_ftimgi")
aif_dst_ftimg.wbegin()
aif_dst_norm = aif(outname_pre + subject_code + "_normd", outname_pre + subject_code +"_normi")
aif_dst_norm.wbegin()
json_dst_labels = []
fh_label = open(outname_pre + subject_code + "_label.json", 'w')
print("on going: " + src_dfile)
#label src file
print("corresponding label: " + src_labfile)
rppg_lab = RPPGLabel(src_labfile)
#features & labels
aif_inst = aif(src_dfile, src_ifile)
dat_info = aif_inst.rbegin()
# d_norm = []# [[[mean1,mean2,mean3],[std1,std2,std3]]...478]
d_v300 = deque()
total_none = 0
for i in range(0, len(dat_info), interval):# frames within file
if if_src_raw:
img0 = (aif_inst.extract_raw(i)).copy()
else:
img0 = aif_inst.extract(i)
# img0 = cv2.resize(img0, (144, 144))
if (i + interval) < len(dat_info):
# make norm
# if i < norm_len and img0.shape == expected_shape:
# d_v300.append(img0) # 300 frames * 478 crosses
# else:
# pass
print('person: {}, frame num: {}'.format(subject_code,i))
#one norm, one ft, one label
if if_src_raw:
img1 = (aif_inst.extract_raw(i + interval)).copy()
else:
img1 = aif_inst.extract(i + interval)
if img1.shape == expected_shape and img0.shape == expected_shape:
xdiff = img1 - img0 + 128
if i < norm_len:
d_v300.append(xdiff) # 300 frames * 478 crosses
else:
# make norm
cur_norm = get_norm(d_v300)
aif_dst_norm.append_any(np.array(cur_norm, dtype="float32").tobytes())
d_v300.popleft()
d_v300.append(xdiff)
# adjust to the median position
ftimg = xdiff
aif_dst_ftimg.append(ftimg)# features
xlab = rppg_lab.get_label_by_frame(i)
if xlab == None:
total_none += 1
json_dst_labels.append(xlab) #labels
else:
aif_dst_ftimg.append_any(np.array([]).tobytes())
json_dst_labels.append(None)
aif_dst_norm.append_any(np.array([]).tobytes())
else:# end of a file, append an empty node
aif_dst_ftimg.append_any(np.array([]).tobytes())
json_dst_labels.append(None)
aif_dst_norm.append_any(np.array([]).tobytes())
aif_inst.rfinish()
json.dump(json_dst_labels, fh_label)
fh_label.close()
aif_dst_ftimg.wfinish()
aif_dst_norm.wfinish()
print("total_none: " + str(total_none))
def _make_norm_pv_ldmk_MTCore(outname_pre, subject_code, src_dfile, src_ifile, src_labfile, norm_len):
expected_shape=(49,49,3)
aif_dst_ftimg = aif(outname_pre + subject_code +"_ftimgd", outname_pre + subject_code +"_ftimgi")
aif_dst_ftimg.wbegin()
aif_dst_norm = aif(outname_pre + subject_code + "_normd", outname_pre + subject_code +"_normi")
aif_dst_norm.wbegin()
json_dst_labels = []
fh_label = open(outname_pre + subject_code + "_label.json", 'w')
print("on going: " + src_dfile)
#label src file
print("corresponding label: " + src_labfile)
rppg_lab = RPPGLabel(src_labfile)
#features & labels
aif_inst = aif(src_dfile, src_ifile)
dat_info = aif_inst.rbegin()
# loop here
# make mixed label
lab_coll = rppg_lab.get_frm_num_at_pvs()
pv_frmnums_coll = []
p_or_v = []
niter = len(lab_coll[0]) if len(lab_coll[0]) > len(lab_coll[1]) else len(lab_coll[1])
for xl in range(0, niter):
if xl < len(lab_coll[0]):
pv_frmnums_coll.append(lab_coll[0][xl])
p_or_v.append(0)
if xl < len(lab_coll[1]):
pv_frmnums_coll.append(lab_coll[1][xl])
p_or_v.append(1)
# make featrue and norm
loop_idx = 0
for xpfrm in pv_frmnums_coll:
# ret norm = []# [[[mean1,mean2,mean3],[std1,std2,std3]]...478]
norm_buf = []
if xpfrm < len(dat_info):
img0 = aif_inst.extract(xpfrm)
if xpfrm > norm_len and img0.shape == expected_shape:
print(subject_code + ": " + str(xpfrm))
# featlab
aif_dst_ftimg.append(img0)
json_dst_labels.append(p_or_v[loop_idx])
# norm
for i in range(xpfrm - norm_len, xpfrm):
norm_img = aif_inst.extract(xpfrm)
if norm_img.shape == expected_shape:
norm_buf.append(norm_img)
cur_norm = get_norm(norm_buf)
aif_dst_norm.append_any(np.array(cur_norm, dtype="float32").tobytes())
loop_idx += 1
aif_inst.rfinish()
json.dump(json_dst_labels, fh_label)
fh_label.close()
aif_dst_ftimg.wfinish()
aif_dst_norm.wfinish()
def make_normalized_landmarks_dbMT(outputfolder, basefolder, subject_list,\
keyfunc, norm_len=60):
pool = Pool(14)
for root, dirs, files in os.walk(basefolder):
for dname in dirs:
prev_path = os.path.join(root, dname) + "\\"
# xsplit = prev_path.split("\\")
dfilename = prev_path + "datafile"
difilename = prev_path + "idxfile"
if os.path.exists(dfilename) and os.path.dirname(dfilename)[-1] == '1':# get only the first video
src_label_fname = root + "\\" + "bvp_" + root.split("\\")[-1] + "_T" + os.path.dirname(dfilename)[-1] + ".csv"
subject_code = root.split("\\")[-1]
if len(subject_list) != 0 and subject_code not in subject_list:
break
# MT
pool.apply_async(func=keyfunc, args=(\
outputfolder, subject_code, dfilename,\
difilename, src_label_fname, norm_len,))
pool.close()
pool.join()
if __name__ == "__main__":
# inter-frame plan
# make_normalized_landmarks_dbMT(\
# "G:\\ecg\\AIFDatabase\\ldmk_intermediate\\interframe\\", "G:\\ecg\\raw",\
# subject_list=[], keyfunc=_make_norm_ldmk_MTCore)
# _make_norm_ldmk_MTCore("G:/ecg/AIFDatabase/ldmk_intermediate/interframe/","s3",\
# "G:/ecg/raw/s3/f_imgs1/datafile",\
# "G:/ecg/raw/s3/f_imgs1/idxfile",\
# "G:/ecg/raw/s3/bvp_s3_T1.csv",60)
# only peaks and valleys ---- NO GOOD!
# MT
# make_normalized_landmarks_dbMT("G:\\ecg\\AIFDatabase\\ldmk_intermediate\\", "G:\\ecg\\raw",\
# subject_list=['s2'],\
# keyfunc=_make_norm_pv_ldmk_MTCore)
#
# ST
# _make_norm_pv_ldmk_MTCore("G:\\ecg\\AIFDatabase\\ldmk_intermediate\\peakvalley\\", "s2",\
# "G:\\ecg\\raw\\s2\\f_imgs1\\datafile", "G:\\ecg\\raw\\s2\\f_imgs1\\idxfile",\
# "G:\\ecg\\raw\\s2\\bvp_s2_T1.csv", 60)
# image
# merge_aifs_digiapdx("G:/ecg/AIFDatabase/ldmk_intermediate/interframe/",\
# "s", "_ftimgd", "_ftimgi", 2, 38,\
# "G:/ecg/AIFDatabase/ldmk_aggr/feature",dtype='uint8')
# norm
merge_aifs_digiapdx("G:/ecg/AIFDatabase/ldmk_intermediate/interframe/",\
"s", "_normd", "_normi", 2, 38, "G:/ecg/AIFDatabase/ldmk_aggr/norm",\
extract="bytes",dtype='float32')
# # labels
# merge_jsons("G:/ecg/AIFDatabase/ldmk_intermediate/interframe/",\
# "s", "_label.json", 2, 38, "G:/ecg/AIFDatabase/ldmk_aggr/label.json")
import datetime
class FitLog:
def __init__(self, folderpath="", fname=None):
if fname == None:
fname = datetime.datetime.now().strftime("%y%m%d%H%M%S" + ".log")
self.fh = open(folderpath + fname, 'w')
def append(self, line, with_time=False, change_line=True):
str2append = ""
if with_time is False:
str2append = line
else:
str2append = str(datetime.datetime.now()) + " " + line
if change_line is True:
str2append += "\r\n"
self.fh.write(str2append)
self.fh.flush()
def close(self):
self.fh.flush()
self.fh.close()
\ No newline at end of file
import datetime
import os
class FitLog:
def __init__(self, folderpath="", fname=None, prefix=''):
self.fname = fname
if self.fname == None:
self.fname = prefix + datetime.datetime.now().strftime("%y%m%d%H%M%S" + ".log")
self.fh = open(folderpath + self.fname, 'w', newline='')
def append(self, line, with_time=False, change_line=True):
str2append = ""
if with_time is False:
str2append = line
else:
str2append = str(datetime.datetime.now()) + " " + line
if change_line is True:
str2append += os.linesep
self.fh.write(str2append)
self.fh.flush()
def close(self):
self.fh.flush()
self.fh.close()
\ No newline at end of file
from torch.utils.data import Dataset
from libAIF import aif
import os
import json
import numpy as np
import cv2
from label import RPPGLabel
import random
import torch
from torchvision import transforms
from feature import feat_normalize
# captured compact AIF dataset
class iframe_feeder(Dataset):
def __init__(self, featdfile, featifile, labfile, normdfile, normifile, test_ratio=0.1):
self.test_ratio = test_ratio
self.pic_size = 49
#feature access
self.feature_aif = aif(featdfile, featifile)
self.ftdata_info = self.feature_aif.rbegin()
self.norm_aif = aif(normdfile, normifile)
self.norm_info = self.norm_aif.rbegin()
#label access
labfh = open(labfile)
self.all_labels = json.load(labfh)
labfh.close()
#other
self.datalen = 0
self.datalen_train = 0
self.datalen_test = 0
self.valid_indeces = []
self.valid_train_indeces = []
self.valid_test_indeces = []
self.mode = None
#screen invalid data, randomization
self._prepare()
def finish(self):
self.feature_aif.rfinish()
self.norm_aif.rfinish()
def set_mode(self, mode):
self.mode = mode
def _prepare(self):
raw_len = len(self.ftdata_info)
for i in range(0, raw_len):
if self.all_labels[i] != None:# and len(self.ftps[i]) > 0:
self.valid_indeces.append(i)
self.datalen += 1
# randomization
test_size = int(self.datalen * self.test_ratio)
test_beg = random.randint(0, self.datalen - test_size)
test_end = test_beg + test_size
#feats, labs
for x in range(0, len(self.valid_indeces)):
if test_beg < x < test_end:
self.valid_test_indeces.append(self.valid_indeces[x])
else:
self.valid_train_indeces.append(self.valid_indeces[x])
def __getitem__(self, index):
# feat_buf = []
f_tensor = None
l_tensor = None
actual_idx = -1
if self.mode == "train":
actual_idx = self.valid_train_indeces[index]
elif self.mode == "test":
actual_idx = self.valid_test_indeces[index]
else:
return None
xfeat = self.feature_aif.extract(actual_idx)
xfeat = xfeat.reshape(self.pic_size,self.pic_size,3)
xnorm = self.norm_aif.extract_raw(actual_idx, 'float32')
norm_feat = feat_normalize(xfeat, xnorm, m_offset=0)
pre_proc = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize(mean=[0.5,0.5,0.5], std=[0.05,0.05,0.05]),
])
f_tensor = pre_proc(norm_feat).to(torch.float32)
xlab = np.array(self.all_labels[actual_idx])
l_tensor = torch.from_numpy(xlab).to(torch.int64)
return f_tensor, l_tensor
def __len__(self):
# return self.datalen
if self.mode == "train":
return len(self.valid_train_indeces)
elif self.mode == "test":
return len(self.valid_test_indeces)
else:
return None
if __name__ == "__main__":
x = iframe_feeder("F:\\ecg")
x._read_featlab("F:\\ecg")
\ No newline at end of file
import matplotlib.pyplot as plt
import numpy as np
from libPEAK import *
import os
# 64Hz
class RPPGLabel:
def __init__(self, filename):
self.filename = filename
self.raw_sig = self._read_signal(filename)
self.smooth_sig = self._smooth(self.raw_sig)
def _read_signal(self, path):
ret = []
if os.path.exists(path):
fh = open(path, 'r')
line = fh.readline()
while line:
ret.append(float(line))
line = fh.readline()
fh.close()
return ret
else:
return None
def _smooth(self, sig):
smooth_dat = side_pass(sig, 2, 64, 2, passtype='lowpass')
return smooth_dat
def plot_signal(self, sig):
plt.plot(sig)
plt.show()
# 3-minute per video
# bvp sensor 64Hz
# camera 35.14 fps
# every 1.8212862834376777 bvp signal per frame
# index of inter-frame within a file
def get_label_by_frame(self, iiframe, interval=5, diff_tole=10):
bpf = 1.8212862834376777
beg_bvp = int(round(iiframe * bpf))
end_bvp = int(round((iiframe + interval) * bpf))
full_diff = 0
for i in range(beg_bvp, end_bvp):
if i + 1 < len(self.smooth_sig):
if self.smooth_sig[i] != None and self.smooth_sig[i + 1] != None:
full_diff += (self.smooth_sig[i + 1] - self.smooth_sig[i])
if full_diff >= diff_tole:# number can be optimized
return 1
elif full_diff <= -1 * diff_tole:
return 0
else:
return None
# get labels at peaks and valleys
# return frame number [[peaks], [valleys]]
def get_frm_num_at_pvs(self):
sec_per_sig = 1 / 64
sec_per_frm = 1 / 35.14
pk_frm_num = []
vl_frm_num = []
peakl, peakr, valleyl, valleyr =\
find_valleys_peaks(self.smooth_sig)
for onepeak in peakl:
pk_frm_num.append(int(round(onepeak * sec_per_sig / sec_per_frm)))
for onevalley in valleyl:
vl_frm_num.append(int(round(onevalley * sec_per_sig / sec_per_frm)))
return [vl_frm_num, pk_frm_num]
def get_raw_size(self):
return len(self.smooth_sig)
if __name__ == "__main__":
rlable = RPPGLabel("G:\\ecg\\raw\\s2\\bvp_s2_T1.csv")
# smooth_dat = _smooth(sig)
rlable.plot_signal(rlable.raw_sig[128:1024])
# all_labels = RPPGLabel().make_all_labels("F:/ecg")
# z = 0
import json
import cv2
import numpy as np
class aif:
def __init__(self, dpath, ipath):
self.data_fh = None
self.data_idx = None
self.datainfo = None
self.cursor = 0
self.dpath = dpath
self.ipath = ipath
def wbegin(self):
self.data_fh = open(self.dpath, 'wb')
self.idx_fh = open(self.ipath, 'w')
self.datainfo = []
self.cursor = 0
def rbegin(self):
self.data_fh = open(self.dpath, 'rb')
self.idx_fh = open(self.ipath, 'r')
self.datainfo = json.load(self.idx_fh)
return self.datainfo
def append(self, img):
try:
success, enc_img = cv2.imencode(".png", img)
data = enc_img.tobytes()
self.data_fh.write(data)
self.datainfo.append([self.cursor, len(data)])
self.cursor += len(data)
except Exception as err:
print("libAIF append() exception: " + str(err))
self.datainfo.append([self.cursor, 0])
def append_any(self, data):
try:
self.data_fh.write(data)
self.datainfo.append([self.cursor, len(data)])
self.cursor += len(data)
except Exception as err:
print("libAIF append() exception: " + str(err))
self.datainfo.append([self.cursor, 0])
def extract(self, idx):
xinfo = self.datainfo[idx]
self.data_fh.seek(xinfo[0])
img_dat = self.data_fh.read(xinfo[1])
img_dat = np.asarray(bytearray(img_dat), dtype='uint8')
if len(img_dat) > 3:
img = cv2.imdecode(img_dat, cv2.IMREAD_COLOR)
else:
return np.array([])
return img
def extract_raw(self, idx, dtype='uint8'):
xinfo = self.datainfo[idx]
self.data_fh.seek(xinfo[0])
img_dat = self.data_fh.read(xinfo[1])
# img_dat = np.asarray(bytearray(img_dat), dtype='uint8')
img_dat = np.frombuffer(img_dat, dtype=dtype)
return img_dat
def __finish(self):
self.data_fh.close()
self.idx_fh.close()
def wfinish(self):
json.dump(self.datainfo, self.idx_fh)
self.__finish()
def rfinish(self):
self.__finish()
if __name__ == "__main__":
xaif = aif("D:/ecg/s2/f_imgs1/datafile", "D:/ecg/s2/f_imgs1/idxfile")
datinfo = xaif.rbegin()
for i in range(0, len(datinfo)):
cv2.imwrite("D:/ecg/s2/f_imgs1/" + str(i) + ".png", xaif.extract(i))
xaif.rfinish()
from scipy import signal
def band_pass(data, interval, sampFreq, order):
tech_inter = []
tech_inter.append(2*interval[0]/sampFreq)
tech_inter.append(2*interval[1]/sampFreq)
b, a, = signal.butter(N=order, Wn=tech_inter, btype='bandpass')
ret = signal.filtfilt(b, a, data)
return ret
def side_pass(data, bar, sampFreq, order, passtype='highpass'):
tech_bar = 2*bar/sampFreq
b, a, = signal.butter(N=order, Wn=tech_bar, btype=passtype)
ret = signal.filtfilt(b, a, data)
return ret
def find_valleys_peaks(data):
peaksleft = []
peaksright = []
valleyleft = []
valleyright = []
for i in range(1, len(data) - 1):
if data[i] > data[i - 1] and data[i] >= data[i + 1]:
peaksleft.append(i)
if data[i] >= data[i - 1] and data[i] > data[i + 1]:
peaksright.append(i)
if data[i] < data[i - 1] and data[i] <= data[i + 1]:
valleyleft.append(i)
if data[i] <= data[i - 1] and data[i] < data[i + 1]:
valleyright.append(i)
return peaksleft, peaksright, valleyleft, valleyright
def get_pese_path():
return "F:/SynologyDrive/ProjectsExtend/PESELibs/"
\ No newline at end of file
ld.lld: warning: cannot find entry symbol mit-obj; not setting start address
# Cambricon PyTorch Model Migration Report
## Cambricon PyTorch Changes
| No. | File | Description |
| 1 | ShuffleNetV2Driver.py:9 | add "import torch_mlu" |
| 2 | ShuffleNetV2Driver.py:30 | change "#self.device = self.get_device() #torch.device('cuda' if torch.cuda.is_available() else 'cpu')" to "#self.device = self.get_device() #torch.device('mlu' if torch.mlu.is_available() else 'cpu') " |
| 3 | ShuffleNetV2Driver.py:31 | change "self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" to "self.device = torch.device('mlu' if torch.mlu.is_available() else 'cpu') " |
| 4 | ShuffleNetV2Driver.py:33 | change "print(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))" to "print(torch.device('mlu' if torch.mlu.is_available() else 'cpu')) " |
| 5 | ShuffleNetV2Driver.py:53 | change "device_gpu = torch.device('cuda')" to "device_gpu = torch.device('mlu') " |
| 6 | ShuffleNetV2Driver.py:60 | change "self.fitlog.append('cuda', True, True)" to "self.fitlog.append('mlu', True, True) " |
| 7 | iframe_feeder.py:10 | add "import torch_mlu" |
| 8 | ShuffleNet/data.py:3 | add "import torch_mlu" |
| 9 | ShuffleNet/model.py:3 | add "import torch_mlu" |
| 10 | ShuffleNet/imagenet.py:8 | add "import torch_mlu" |
| 11 | ShuffleNet/imagenet.py:82 | change "torch.cuda.manual_seed_all(args.seed)" to "torch.mlu.manual_seed_all(args.seed) " |
| 12 | ShuffleNet/imagenet.py:95 | change "device = 'cuda:' + str(args.gpus[0])" to "device = 'mlu:' + str(args.gpus[0]) " |
| 13 | ShuffleNet/flops_benchmark.py:2 | add "import torch_mlu" |
| 14 | ShuffleNet/flops_benchmark.py:15 | change "fcn = fcn.cuda().train()" to "fcn = fcn.mlu().train() " |
| 15 | ShuffleNet/run.py:6 | add "import torch_mlu" |
2023-06-27 20:24:29.097284 cuda
2023-06-27 20:24:46.711510 Train: epoch: 1 batch: 0/4, loss: 0.691143
2023-06-27 20:25:32.785069 Validation: avg loss: 0.6893, avg acc: 57.1429%
2023-06-27 20:25:47.336659 Train: epoch: 2 batch: 0/4, loss: 0.685980
2023-06-27 20:26:32.734245 Validation: avg loss: 0.6867, avg acc: 57.1429%
2023-06-27 20:26:47.106466 Train: epoch: 3 batch: 0/4, loss: 0.672524
2023-06-27 20:27:32.526984 Validation: avg loss: 0.6864, avg acc: 57.1429%
2023-06-27 20:27:46.909404 Train: epoch: 4 batch: 0/4, loss: 0.644080
2023-06-27 20:28:33.568527 Validation: avg loss: 0.6929, avg acc: 57.1429%
2023-06-27 20:28:48.329049 Train: epoch: 5 batch: 0/4, loss: 0.644745
2023-06-27 20:29:34.213962 Validation: avg loss: 0.7186, avg acc: 42.8571%
2023-06-27 20:29:48.518932 Train: epoch: 6 batch: 0/4, loss: 0.647197
2023-06-27 20:30:34.266611 Validation: avg loss: 0.7420, avg acc: 42.8571%
2023-06-27 20:30:48.472798 Train: epoch: 7 batch: 0/4, loss: 0.603712
2023-06-27 20:31:33.716582 Validation: avg loss: 0.7435, avg acc: 42.8571%
2023-06-27 20:31:47.942766 Train: epoch: 8 batch: 0/4, loss: 0.557651
2023-06-27 20:32:33.315917 Validation: avg loss: 0.7444, avg acc: 42.8571%
2023-06-27 20:32:47.738961 Train: epoch: 9 batch: 0/4, loss: 0.537784
2023-06-27 20:33:33.803949 Validation: avg loss: 0.8276, avg acc: 42.8571%
2023-06-27 20:33:48.296666 Train: epoch: 10 batch: 0/4, loss: 0.456332
2023-06-27 20:34:35.173420 Validation: avg loss: 0.8768, avg acc: 42.8571%
2023-06-27 20:34:52.401751 Train: epoch: 11 batch: 0/4, loss: 0.332917
2023-06-27 20:35:47.800032 Validation: avg loss: 0.9420, avg acc: 42.8571%
2023-06-27 20:36:04.354299 Train: epoch: 12 batch: 0/4, loss: 0.285392
2023-06-27 20:36:49.498897 Validation: avg loss: 0.9740, avg acc: 42.8571%
2023-06-27 20:37:03.749654 Train: epoch: 13 batch: 0/4, loss: 0.266133
2023-06-27 20:37:48.830727 Validation: avg loss: 0.9945, avg acc: 47.6190%
2023-06-27 20:38:03.160350 Train: epoch: 14 batch: 0/4, loss: 0.131294
2023-06-27 20:38:48.501099 Validation: avg loss: 0.8946, avg acc: 49.5238%
2023-06-27 20:39:02.726512 Train: epoch: 15 batch: 0/4, loss: 0.118250
2023-06-27 20:39:47.995243 Validation: avg loss: 0.8834, avg acc: 54.2857%
2023-06-27 20:40:04.750322 Train: epoch: 16 batch: 0/4, loss: 0.065639
2023-06-27 20:40:59.221551 Validation: avg loss: 1.0354, avg acc: 49.5238%
2023-06-27 20:41:16.494496 Train: epoch: 17 batch: 0/4, loss: 0.091341
2023-06-27 20:42:11.264989 Validation: avg loss: 1.0501, avg acc: 50.4762%
2023-06-27 20:42:28.539772 Train: epoch: 18 batch: 0/4, loss: 0.025633
2023-06-27 20:43:23.100663 Validation: avg loss: 1.0062, avg acc: 59.0476%
2023-06-27 20:43:40.148723 Train: epoch: 19 batch: 0/4, loss: 0.036730
2023-06-27 20:44:34.538871 Validation: avg loss: 0.9890, avg acc: 60.9524%
2023-06-27 20:44:49.300661 Train: epoch: 20 batch: 0/4, loss: 0.014275
2023-06-27 20:45:35.217730 Validation: avg loss: 1.0903, avg acc: 61.9048%
2023-06-27 20:45:49.839872 Train: epoch: 21 batch: 0/4, loss: 0.014764
2023-06-27 20:46:35.720425 Validation: avg loss: 1.1801, avg acc: 61.9048%
2023-06-27 20:46:49.982285 Train: epoch: 22 batch: 0/4, loss: 0.007254
2023-06-27 20:47:35.058200 Validation: avg loss: 1.2001, avg acc: 64.7619%
2023-06-27 20:47:49.367503 Train: epoch: 23 batch: 0/4, loss: 0.007846
2023-06-27 20:48:34.741647 Validation: avg loss: 1.3219, avg acc: 65.7143%
2023-06-27 20:48:49.206170 Train: epoch: 24 batch: 0/4, loss: 0.006899
2023-06-27 20:49:34.938107 Validation: avg loss: 1.3289, avg acc: 65.7143%
2023-06-27 20:49:49.300229 Train: epoch: 25 batch: 0/4, loss: 0.009685
2023-06-27 20:50:34.859294 Validation: avg loss: 1.3377, avg acc: 62.8571%
2023-06-27 20:50:49.163374 Train: epoch: 26 batch: 0/4, loss: 0.013282
2023-06-27 20:51:34.162103 Validation: avg loss: 1.3314, avg acc: 62.8571%
2023-06-27 20:51:48.527172 Train: epoch: 27 batch: 0/4, loss: 0.009669
2023-06-27 20:52:34.520132 Validation: avg loss: 1.3528, avg acc: 63.8095%
2023-06-27 20:52:49.095199 Train: epoch: 28 batch: 0/4, loss: 0.010325
2023-06-27 20:53:34.230659 Validation: avg loss: 1.3256, avg acc: 66.6667%
2023-06-27 20:53:48.453604 Train: epoch: 29 batch: 0/4, loss: 0.004010
2023-06-27 20:54:33.935093 Validation: avg loss: 1.3582, avg acc: 64.7619%
2023-06-27 20:54:48.419516 Train: epoch: 30 batch: 0/4, loss: 0.003470
2023-06-27 20:55:35.252915 Validation: avg loss: 1.3894, avg acc: 63.8095%
2023-06-27 20:55:49.848116 Train: epoch: 31 batch: 0/4, loss: 0.005459
2023-06-27 20:56:35.907501 Validation: avg loss: 1.4114, avg acc: 64.7619%
2023-06-27 20:56:50.295728 Train: epoch: 32 batch: 0/4, loss: 0.003528
2023-06-27 20:57:36.702376 Validation: avg loss: 1.4297, avg acc: 65.7143%
2023-06-27 20:57:51.026266 Train: epoch: 33 batch: 0/4, loss: 0.006942
2023-06-27 20:58:36.208021 Validation: avg loss: 1.4177, avg acc: 64.7619%
2023-06-27 20:58:50.450671 Train: epoch: 34 batch: 0/4, loss: 0.002612
2023-06-27 20:59:35.726068 Validation: avg loss: 1.4280, avg acc: 64.7619%
2023-06-27 20:59:50.053097 Train: epoch: 35 batch: 0/4, loss: 0.002576
2023-06-27 21:00:35.161984 Validation: avg loss: 1.4479, avg acc: 65.7143%
2023-06-27 21:00:49.595550 Train: epoch: 36 batch: 0/4, loss: 0.007597
2023-06-27 21:01:35.293097 Validation: avg loss: 1.4474, avg acc: 65.7143%
2023-06-27 21:01:49.741054 Train: epoch: 37 batch: 0/4, loss: 0.002884
2023-06-27 21:02:35.435971 Validation: avg loss: 1.4477, avg acc: 64.7619%
2023-06-27 21:02:50.076695 Train: epoch: 38 batch: 0/4, loss: 0.006051
2023-06-27 21:03:35.825173 Validation: avg loss: 1.4994, avg acc: 64.7619%
2023-06-27 21:03:50.432057 Train: epoch: 39 batch: 0/4, loss: 0.001255
2023-06-27 21:04:36.140945 Validation: avg loss: 1.5201, avg acc: 64.7619%
2023-06-27 21:04:50.613230 Train: epoch: 40 batch: 0/4, loss: 0.002234
2023-06-27 21:05:36.480363 Validation: avg loss: 1.5425, avg acc: 64.7619%
2023-06-27 21:05:51.169549 Train: epoch: 41 batch: 0/4, loss: 0.001239
2023-06-27 21:06:37.757136 Validation: avg loss: 1.5486, avg acc: 65.7143%
2023-06-27 21:06:52.081432 Train: epoch: 42 batch: 0/4, loss: 0.001083
2023-06-27 21:07:37.749521 Validation: avg loss: 1.5343, avg acc: 65.7143%
2023-06-27 21:07:52.544739 Train: epoch: 43 batch: 0/4, loss: 0.001184
2023-06-27 21:08:37.779487 Validation: avg loss: 1.5527, avg acc: 65.7143%
2023-06-27 21:08:52.231289 Train: epoch: 44 batch: 0/4, loss: 0.001146
2023-06-27 21:09:38.142167 Validation: avg loss: 1.5314, avg acc: 65.7143%
2023-06-27 21:09:52.818703 Train: epoch: 45 batch: 0/4, loss: 0.000785
2023-06-27 21:10:38.542424 Validation: avg loss: 1.5186, avg acc: 65.7143%
2023-06-27 21:10:53.183909 Train: epoch: 46 batch: 0/4, loss: 0.001573
2023-06-27 21:11:38.626099 Validation: avg loss: 1.5108, avg acc: 65.7143%
2023-06-27 21:11:53.051555 Train: epoch: 47 batch: 0/4, loss: 0.000877
2023-06-27 21:12:38.247759 Validation: avg loss: 1.5156, avg acc: 65.7143%
2023-06-27 21:12:52.556644 Train: epoch: 48 batch: 0/4, loss: 0.001116
2023-06-27 21:13:38.527263 Validation: avg loss: 1.5247, avg acc: 65.7143%
2023-06-27 21:13:53.461653 Train: epoch: 49 batch: 0/4, loss: 0.000840
2023-06-27 21:14:39.023648 Validation: avg loss: 1.5159, avg acc: 65.7143%
2023-06-27 21:14:53.501405 Train: epoch: 50 batch: 0/4, loss: 0.000605
2023-06-27 21:15:38.909785 Validation: avg loss: 1.5166, avg acc: 65.7143%
2023-06-27 21:15:39.051995 cuda
2023-06-27 21:15:53.452143 Train: epoch: 1 batch: 0/4, loss: 0.691736
2023-06-27 21:16:38.049762 Validation: avg loss: 0.6836, avg acc: 59.0476%
2023-06-27 21:16:52.185850 Train: epoch: 2 batch: 0/4, loss: 0.683868
2023-06-27 21:17:37.389718 Validation: avg loss: 0.6768, avg acc: 59.0476%
2023-06-27 21:17:51.753388 Train: epoch: 3 batch: 0/4, loss: 0.703129
2023-06-27 21:18:36.661202 Validation: avg loss: 0.6775, avg acc: 59.0476%
2023-06-27 21:18:50.934819 Train: epoch: 4 batch: 0/4, loss: 0.667924
2023-06-27 21:19:35.880084 Validation: avg loss: 0.6783, avg acc: 59.0476%
2023-06-27 21:19:49.920322 Train: epoch: 5 batch: 0/4, loss: 0.674128
2023-06-27 21:20:34.871162 Validation: avg loss: 0.6920, avg acc: 59.0476%
2023-06-27 21:20:49.132727 Train: epoch: 6 batch: 0/4, loss: 0.626494
2023-06-27 21:21:34.525190 Validation: avg loss: 0.7429, avg acc: 59.0476%
2023-06-27 21:21:48.751058 Train: epoch: 7 batch: 0/4, loss: 0.567501
2023-06-27 21:22:33.738745 Validation: avg loss: 0.7772, avg acc: 59.0476%
2023-06-27 21:22:47.951232 Train: epoch: 8 batch: 0/4, loss: 0.562780
2023-06-27 21:23:32.943781 Validation: avg loss: 0.8472, avg acc: 59.0476%
2023-06-27 21:23:47.254353 Train: epoch: 9 batch: 0/4, loss: 0.426511
2023-06-27 21:24:32.145615 Validation: avg loss: 0.9992, avg acc: 59.0476%
2023-06-27 21:24:46.344709 Train: epoch: 10 batch: 0/4, loss: 0.375800
2023-06-27 21:25:32.287260 Validation: avg loss: 1.1576, avg acc: 59.0476%
2023-06-27 21:25:47.115042 Train: epoch: 11 batch: 0/4, loss: 0.274453
2023-06-27 21:26:32.773379 Validation: avg loss: 1.2782, avg acc: 59.0476%
2023-06-27 21:26:47.355081 Train: epoch: 12 batch: 0/4, loss: 0.188514
2023-06-27 21:27:33.135887 Validation: avg loss: 1.3211, avg acc: 59.0476%
2023-06-27 21:27:47.506455 Train: epoch: 13 batch: 0/4, loss: 0.197023
2023-06-27 21:28:32.250146 Validation: avg loss: 1.3828, avg acc: 58.0952%
2023-06-27 21:28:46.293063 Train: epoch: 14 batch: 0/4, loss: 0.088516
2023-06-27 21:29:30.824847 Validation: avg loss: 1.3960, avg acc: 58.0952%
2023-06-27 21:29:44.970034 Train: epoch: 15 batch: 0/4, loss: 0.083743
2023-06-27 21:30:30.407492 Validation: avg loss: 1.2767, avg acc: 60.9524%
2023-06-27 21:30:44.778389 Train: epoch: 16 batch: 0/4, loss: 0.039141
2023-06-27 21:31:29.605076 Validation: avg loss: 1.0991, avg acc: 67.6190%
2023-06-27 21:31:43.679548 Train: epoch: 17 batch: 0/4, loss: 0.023605
2023-06-27 21:32:28.838137 Validation: avg loss: 1.1428, avg acc: 67.6190%
2023-06-27 21:32:43.063909 Train: epoch: 18 batch: 0/4, loss: 0.022332
2023-06-27 21:33:28.034252 Validation: avg loss: 1.0250, avg acc: 72.3810%
2023-06-27 21:33:42.076954 Train: epoch: 19 batch: 0/4, loss: 0.012294
2023-06-27 21:34:26.854922 Validation: avg loss: 0.9256, avg acc: 71.4286%
2023-06-27 21:34:40.932934 Train: epoch: 20 batch: 0/4, loss: 0.014384
2023-06-27 21:35:25.635924 Validation: avg loss: 0.9767, avg acc: 73.3333%
2023-06-27 21:35:39.737333 Train: epoch: 21 batch: 0/4, loss: 0.009487
2023-06-27 21:36:24.457411 Validation: avg loss: 0.9440, avg acc: 71.4286%
2023-06-27 21:36:38.586486 Train: epoch: 22 batch: 0/4, loss: 0.006818
2023-06-27 21:37:23.292082 Validation: avg loss: 0.9907, avg acc: 67.6190%
2023-06-27 21:37:37.449483 Train: epoch: 23 batch: 0/4, loss: 0.011777
2023-06-27 21:38:22.274693 Validation: avg loss: 1.1097, avg acc: 67.6190%
2023-06-27 21:38:36.552139 Train: epoch: 24 batch: 0/4, loss: 0.011297
2023-06-27 21:39:21.416102 Validation: avg loss: 1.1239, avg acc: 68.5714%
2023-06-27 21:39:35.577825 Train: epoch: 25 batch: 0/4, loss: 0.009228
2023-06-27 21:40:20.364479 Validation: avg loss: 1.0609, avg acc: 71.4286%
2023-06-27 21:40:34.524510 Train: epoch: 26 batch: 0/4, loss: 0.005655
2023-06-27 21:41:20.015263 Validation: avg loss: 1.0012, avg acc: 75.2381%
2023-06-27 21:41:34.185422 Train: epoch: 27 batch: 0/4, loss: 0.004269
2023-06-27 21:42:19.492635 Validation: avg loss: 0.9907, avg acc: 73.3333%
2023-06-27 21:42:33.711277 Train: epoch: 28 batch: 0/4, loss: 0.005315
2023-06-27 21:43:18.951514 Validation: avg loss: 0.9990, avg acc: 76.1905%
2023-06-27 21:43:33.226150 Train: epoch: 29 batch: 0/4, loss: 0.003101
2023-06-27 21:44:18.385598 Validation: avg loss: 1.0010, avg acc: 75.2381%
2023-06-27 21:44:32.557319 Train: epoch: 30 batch: 0/4, loss: 0.005050
2023-06-27 21:45:17.845059 Validation: avg loss: 0.9835, avg acc: 75.2381%
2023-06-27 21:45:32.311005 Train: epoch: 31 batch: 0/4, loss: 0.003702
2023-06-27 21:46:17.639080 Validation: avg loss: 1.0075, avg acc: 74.2857%
2023-06-27 21:46:32.011410 Train: epoch: 32 batch: 0/4, loss: 0.004959
2023-06-27 21:47:17.200451 Validation: avg loss: 1.0417, avg acc: 74.2857%
2023-06-27 21:47:31.743259 Train: epoch: 33 batch: 0/4, loss: 0.001638
2023-06-27 21:48:16.949034 Validation: avg loss: 1.0667, avg acc: 74.2857%
2023-06-27 21:48:31.370050 Train: epoch: 34 batch: 0/4, loss: 0.001611
2023-06-27 21:49:16.177922 Validation: avg loss: 1.0987, avg acc: 71.4286%
2023-06-27 21:49:30.724478 Train: epoch: 35 batch: 0/4, loss: 0.002110
2023-06-27 21:50:15.919399 Validation: avg loss: 1.1157, avg acc: 72.3810%
2023-06-27 21:50:30.599470 Train: epoch: 36 batch: 0/4, loss: 0.001120
2023-06-27 21:51:16.389382 Validation: avg loss: 1.1388, avg acc: 72.3810%
2023-06-27 21:51:30.679179 Train: epoch: 37 batch: 0/4, loss: 0.002431
2023-06-27 21:52:15.278430 Validation: avg loss: 1.1491, avg acc: 70.4762%
2023-06-27 21:52:29.639813 Train: epoch: 38 batch: 0/4, loss: 0.001873
2023-06-27 21:53:14.536530 Validation: avg loss: 1.1600, avg acc: 71.4286%
2023-06-27 21:53:28.745685 Train: epoch: 39 batch: 0/4, loss: 0.001235
2023-06-27 21:54:14.776436 Validation: avg loss: 1.1614, avg acc: 71.4286%
2023-06-27 21:54:31.605806 Train: epoch: 40 batch: 0/4, loss: 0.001205
2023-06-27 21:55:24.907567 Validation: avg loss: 1.1676, avg acc: 70.4762%
2023-06-27 21:55:42.329314 Train: epoch: 41 batch: 0/4, loss: 0.001023
2023-06-27 21:56:36.481476 Validation: avg loss: 1.1545, avg acc: 72.3810%
2023-06-27 21:56:53.250157 Train: epoch: 42 batch: 0/4, loss: 0.000928
2023-06-27 21:57:42.010911 Validation: avg loss: 1.1706, avg acc: 70.4762%
2023-06-27 21:57:56.068171 Train: epoch: 43 batch: 0/4, loss: 0.000772
2023-06-27 21:58:40.707347 Validation: avg loss: 1.1615, avg acc: 68.5714%
2023-06-27 21:58:54.865171 Train: epoch: 44 batch: 0/4, loss: 0.000777
2023-06-27 21:59:39.367311 Validation: avg loss: 1.1684, avg acc: 69.5238%
2023-06-27 21:59:53.743477 Train: epoch: 45 batch: 0/4, loss: 0.000864
2023-06-27 22:00:38.497434 Validation: avg loss: 1.1807, avg acc: 69.5238%
2023-06-27 22:00:52.622961 Train: epoch: 46 batch: 0/4, loss: 0.000633
2023-06-27 22:01:37.127766 Validation: avg loss: 1.1631, avg acc: 70.4762%
2023-06-27 22:01:51.245015 Train: epoch: 47 batch: 0/4, loss: 0.000731
2023-06-27 22:02:35.988887 Validation: avg loss: 1.1821, avg acc: 70.4762%
2023-06-27 22:02:50.288587 Train: epoch: 48 batch: 0/4, loss: 0.000860
2023-06-27 22:03:35.479524 Validation: avg loss: 1.1852, avg acc: 71.4286%
2023-06-27 22:03:49.693263 Train: epoch: 49 batch: 0/4, loss: 0.000542
2023-06-27 22:04:38.663734 Validation: avg loss: 1.1922, avg acc: 71.4286%
2023-06-27 22:04:55.610965 Train: epoch: 50 batch: 0/4, loss: 0.000765
2023-06-27 22:05:48.214104 Validation: avg loss: 1.1724, avg acc: 72.3810%
2023-06-27 22:05:48.247922 cuda
2023-06-27 22:06:02.700087 Train: epoch: 1 batch: 0/4, loss: 0.694332
2023-06-27 22:06:49.134593 Validation: avg loss: 0.6871, avg acc: 57.1429%
2023-06-27 22:07:03.694375 Train: epoch: 2 batch: 0/4, loss: 0.664086
2023-06-27 22:07:49.773964 Validation: avg loss: 0.6849, avg acc: 57.1429%
2023-06-27 22:08:04.264858 Train: epoch: 3 batch: 0/4, loss: 0.680979
2023-06-27 22:08:49.287316 Validation: avg loss: 0.6857, avg acc: 57.1429%
2023-06-27 22:09:03.568651 Train: epoch: 4 batch: 0/4, loss: 0.660581
2023-06-27 22:09:48.550393 Validation: avg loss: 0.6835, avg acc: 57.1429%
2023-06-27 22:10:02.639012 Train: epoch: 5 batch: 0/4, loss: 0.647255
2023-06-27 22:10:47.197546 Validation: avg loss: 0.6823, avg acc: 57.1429%
2023-06-27 22:11:01.412408 Train: epoch: 6 batch: 0/4, loss: 0.614855
2023-06-27 22:11:46.503837 Validation: avg loss: 0.6813, avg acc: 57.1429%
2023-06-27 22:12:00.867246 Train: epoch: 7 batch: 0/4, loss: 0.581202
2023-06-27 22:12:46.349701 Validation: avg loss: 0.6812, avg acc: 57.1429%
2023-06-27 22:13:00.941791 Train: epoch: 8 batch: 0/4, loss: 0.570684
2023-06-27 22:13:46.248950 Validation: avg loss: 0.6799, avg acc: 60.0000%
2023-06-27 22:14:00.818297 Train: epoch: 9 batch: 0/4, loss: 0.483673
2023-06-27 22:14:46.800237 Validation: avg loss: 0.6701, avg acc: 60.0000%
2023-06-27 22:15:01.432393 Train: epoch: 10 batch: 0/4, loss: 0.420846
2023-06-27 22:15:47.795169 Validation: avg loss: 0.6588, avg acc: 71.4286%
2023-06-27 22:16:02.066573 Train: epoch: 11 batch: 0/4, loss: 0.349698
2023-06-27 22:16:47.069922 Validation: avg loss: 0.6403, avg acc: 67.6190%
2023-06-27 22:17:01.212362 Train: epoch: 12 batch: 0/4, loss: 0.290924
2023-06-27 22:17:46.767356 Validation: avg loss: 0.5999, avg acc: 71.4286%
2023-06-27 22:18:01.308525 Train: epoch: 13 batch: 0/4, loss: 0.210976
2023-06-27 22:18:47.025021 Validation: avg loss: 0.5615, avg acc: 71.4286%
2023-06-27 22:19:01.268115 Train: epoch: 14 batch: 0/4, loss: 0.151887
2023-06-27 22:19:46.249894 Validation: avg loss: 0.5789, avg acc: 70.4762%
2023-06-27 22:20:00.317073 Train: epoch: 15 batch: 0/4, loss: 0.091754
2023-06-27 22:20:44.853358 Validation: avg loss: 0.5884, avg acc: 68.5714%
2023-06-27 22:20:59.252199 Train: epoch: 16 batch: 0/4, loss: 0.064195
2023-06-27 22:21:43.926641 Validation: avg loss: 0.6605, avg acc: 67.6190%
2023-06-27 22:21:58.093871 Train: epoch: 17 batch: 0/4, loss: 0.058442
2023-06-27 22:22:42.867910 Validation: avg loss: 0.7124, avg acc: 68.5714%
2023-06-27 22:22:57.267821 Train: epoch: 18 batch: 0/4, loss: 0.034886
2023-06-27 22:23:42.369799 Validation: avg loss: 0.7680, avg acc: 67.6190%
2023-06-27 22:23:56.549715 Train: epoch: 19 batch: 0/4, loss: 0.021634
2023-06-27 22:24:41.550506 Validation: avg loss: 0.8190, avg acc: 65.7143%
2023-06-27 22:24:55.913941 Train: epoch: 20 batch: 0/4, loss: 0.041213
2023-06-27 22:25:40.883883 Validation: avg loss: 0.8872, avg acc: 70.4762%
2023-06-27 22:25:55.354460 Train: epoch: 21 batch: 0/4, loss: 0.037327
2023-06-27 22:26:39.808208 Validation: avg loss: 0.9775, avg acc: 66.6667%
2023-06-27 22:26:54.000967 Train: epoch: 22 batch: 0/4, loss: 0.007783
2023-06-27 22:27:38.436922 Validation: avg loss: 1.0206, avg acc: 65.7143%
2023-06-27 22:27:52.713763 Train: epoch: 23 batch: 0/4, loss: 0.009117
2023-06-27 22:28:37.844541 Validation: avg loss: 1.0509, avg acc: 64.7619%
2023-06-27 22:28:52.372054 Train: epoch: 24 batch: 0/4, loss: 0.004930
2023-06-27 22:29:38.195225 Validation: avg loss: 1.0375, avg acc: 66.6667%
2023-06-27 22:29:52.833951 Train: epoch: 25 batch: 0/4, loss: 0.006854
2023-06-27 22:30:42.418617 Validation: avg loss: 1.0135, avg acc: 70.4762%
2023-06-27 22:30:59.826240 Train: epoch: 26 batch: 0/4, loss: 0.005497
2023-06-27 22:31:53.313775 Validation: avg loss: 0.9878, avg acc: 69.5238%
2023-06-27 22:32:10.441642 Train: epoch: 27 batch: 0/4, loss: 0.013207
2023-06-27 22:33:03.727428 Validation: avg loss: 0.9929, avg acc: 68.5714%
2023-06-27 22:33:20.489639 Train: epoch: 28 batch: 0/4, loss: 0.007555
2023-06-27 22:34:14.171016 Validation: avg loss: 1.0352, avg acc: 63.8095%
2023-06-27 22:34:31.007619 Train: epoch: 29 batch: 0/4, loss: 0.015213
2023-06-27 22:35:24.970835 Validation: avg loss: 1.0920, avg acc: 67.6190%
2023-06-27 22:35:42.393896 Train: epoch: 30 batch: 0/4, loss: 0.005064
2023-06-27 22:36:37.084491 Validation: avg loss: 1.1553, avg acc: 65.7143%
2023-06-27 22:36:54.262733 Train: epoch: 31 batch: 0/4, loss: 0.004100
2023-06-27 22:37:40.496946 Validation: avg loss: 1.1604, avg acc: 64.7619%
2023-06-27 22:37:54.785895 Train: epoch: 32 batch: 0/4, loss: 0.007035
2023-06-27 22:38:39.997172 Validation: avg loss: 1.1841, avg acc: 64.7619%
2023-06-27 22:38:54.265682 Train: epoch: 33 batch: 0/4, loss: 0.002913
2023-06-27 22:39:39.297686 Validation: avg loss: 1.2217, avg acc: 66.6667%
2023-06-27 22:39:53.662343 Train: epoch: 34 batch: 0/4, loss: 0.003259
2023-06-27 22:40:39.630910 Validation: avg loss: 1.2292, avg acc: 67.6190%
2023-06-27 22:40:53.932959 Train: epoch: 35 batch: 0/4, loss: 0.004313
2023-06-27 22:41:39.521995 Validation: avg loss: 1.2720, avg acc: 66.6667%
2023-06-27 22:41:54.010133 Train: epoch: 36 batch: 0/4, loss: 0.006060
2023-06-27 22:42:38.895880 Validation: avg loss: 1.2800, avg acc: 66.6667%
2023-06-27 22:42:53.040187 Train: epoch: 37 batch: 0/4, loss: 0.003258
2023-06-27 22:43:37.680677 Validation: avg loss: 1.3244, avg acc: 68.5714%
2023-06-27 22:43:51.744750 Train: epoch: 38 batch: 0/4, loss: 0.002319
2023-06-27 22:44:36.460156 Validation: avg loss: 1.3268, avg acc: 66.6667%
2023-06-27 22:44:50.518069 Train: epoch: 39 batch: 0/4, loss: 0.002890
2023-06-27 22:45:35.154920 Validation: avg loss: 1.2941, avg acc: 67.6190%
2023-06-27 22:45:49.264716 Train: epoch: 40 batch: 0/4, loss: 0.002698
2023-06-27 22:46:34.424544 Validation: avg loss: 1.2796, avg acc: 65.7143%
2023-06-27 22:46:48.850944 Train: epoch: 41 batch: 0/4, loss: 0.002646
2023-06-27 22:47:34.845515 Validation: avg loss: 1.2635, avg acc: 64.7619%
2023-06-27 22:47:49.211660 Train: epoch: 42 batch: 0/4, loss: 0.001491
2023-06-27 22:48:34.132406 Validation: avg loss: 1.2806, avg acc: 64.7619%
2023-06-27 22:48:48.271090 Train: epoch: 43 batch: 0/4, loss: 0.002615
2023-06-27 22:49:33.343224 Validation: avg loss: 1.2766, avg acc: 65.7143%
2023-06-27 22:49:47.878757 Train: epoch: 44 batch: 0/4, loss: 0.001109
2023-06-27 22:50:32.900879 Validation: avg loss: 1.2934, avg acc: 64.7619%
2023-06-27 22:50:47.024931 Train: epoch: 45 batch: 0/4, loss: 0.001578
2023-06-27 22:51:32.653596 Validation: avg loss: 1.2984, avg acc: 66.6667%
2023-06-27 22:51:46.814239 Train: epoch: 46 batch: 0/4, loss: 0.000983
2023-06-27 22:52:31.790379 Validation: avg loss: 1.3014, avg acc: 67.6190%
2023-06-27 22:52:45.959176 Train: epoch: 47 batch: 0/4, loss: 0.001445
2023-06-27 22:53:30.927607 Validation: avg loss: 1.2994, avg acc: 66.6667%
2023-06-27 22:53:45.037631 Train: epoch: 48 batch: 0/4, loss: 0.001244
2023-06-27 22:54:29.540818 Validation: avg loss: 1.2992, avg acc: 66.6667%
2023-06-27 22:54:43.669885 Train: epoch: 49 batch: 0/4, loss: 0.000969
2023-06-27 22:55:28.244709 Validation: avg loss: 1.2983, avg acc: 65.7143%
2023-06-27 22:55:42.328411 Train: epoch: 50 batch: 0/4, loss: 0.000754
2023-06-27 22:56:27.191368 Validation: avg loss: 1.3086, avg acc: 65.7143%
2023-06-27 22:56:27.225385 cuda
2023-06-27 22:56:41.667818 Train: epoch: 1 batch: 0/4, loss: 0.692002
2023-06-27 22:57:26.874897 Validation: avg loss: 0.6831, avg acc: 60.9524%
2023-06-27 22:57:41.358237 Train: epoch: 2 batch: 0/4, loss: 0.698575
2023-06-27 22:58:26.379618 Validation: avg loss: 0.6732, avg acc: 60.9524%
2023-06-27 22:58:40.809971 Train: epoch: 3 batch: 0/4, loss: 0.667633
2023-06-27 22:59:25.992367 Validation: avg loss: 0.6817, avg acc: 60.9524%
2023-06-27 22:59:40.568559 Train: epoch: 4 batch: 0/4, loss: 0.679582
2023-06-27 23:00:26.298536 Validation: avg loss: 0.6736, avg acc: 60.9524%
2023-06-27 23:00:40.815317 Train: epoch: 5 batch: 0/4, loss: 0.648631
2023-06-27 23:01:26.485894 Validation: avg loss: 0.6695, avg acc: 60.9524%
2023-06-27 23:01:40.843563 Train: epoch: 6 batch: 0/4, loss: 0.635978
2023-06-27 23:02:29.786361 Validation: avg loss: 0.6708, avg acc: 60.9524%
2023-06-27 23:02:44.489382 Train: epoch: 7 batch: 0/4, loss: 0.661664
2023-06-27 23:03:30.037811 Validation: avg loss: 0.6677, avg acc: 60.9524%
2023-06-27 23:03:47.009587 Train: epoch: 8 batch: 0/4, loss: 0.542907
2023-06-27 23:04:32.865509 Validation: avg loss: 0.6692, avg acc: 60.9524%
2023-06-27 23:04:47.364673 Train: epoch: 9 batch: 0/4, loss: 0.522208
2023-06-27 23:05:31.970194 Validation: avg loss: 0.6648, avg acc: 60.9524%
2023-06-27 23:05:46.158624 Train: epoch: 10 batch: 0/4, loss: 0.416084
2023-06-27 23:06:31.870999 Validation: avg loss: 0.6632, avg acc: 60.9524%
2023-06-27 23:06:46.130888 Train: epoch: 11 batch: 0/4, loss: 0.351118
2023-06-27 23:07:31.221879 Validation: avg loss: 0.6562, avg acc: 60.0000%
2023-06-27 23:07:45.620858 Train: epoch: 12 batch: 0/4, loss: 0.248121
2023-06-27 23:08:30.712929 Validation: avg loss: 0.6558, avg acc: 63.8095%
2023-06-27 23:08:45.170842 Train: epoch: 13 batch: 0/4, loss: 0.178588
2023-06-27 23:09:30.276789 Validation: avg loss: 0.6722, avg acc: 61.9048%
2023-06-27 23:09:44.853074 Train: epoch: 14 batch: 0/4, loss: 0.138953
2023-06-27 23:10:30.888671 Validation: avg loss: 0.7421, avg acc: 60.9524%
2023-06-27 23:10:45.386170 Train: epoch: 15 batch: 0/4, loss: 0.071094
2023-06-27 23:11:30.634682 Validation: avg loss: 0.7772, avg acc: 64.7619%
2023-06-27 23:11:44.951292 Train: epoch: 16 batch: 0/4, loss: 0.050339
2023-06-27 23:12:30.229361 Validation: avg loss: 0.8185, avg acc: 66.6667%
2023-06-27 23:12:44.429996 Train: epoch: 17 batch: 0/4, loss: 0.034063
2023-06-27 23:13:29.440894 Validation: avg loss: 0.8194, avg acc: 66.6667%
2023-06-27 23:13:43.809310 Train: epoch: 18 batch: 0/4, loss: 0.023794
2023-06-27 23:14:28.850536 Validation: avg loss: 0.8726, avg acc: 68.5714%
2023-06-27 23:14:43.125232 Train: epoch: 19 batch: 0/4, loss: 0.023007
2023-06-27 23:15:28.506826 Validation: avg loss: 0.9418, avg acc: 68.5714%
2023-06-27 23:15:42.724516 Train: epoch: 20 batch: 0/4, loss: 0.015237
2023-06-27 23:16:28.095369 Validation: avg loss: 0.9700, avg acc: 67.6190%
2023-06-27 23:16:42.467405 Train: epoch: 21 batch: 0/4, loss: 0.018638
2023-06-27 23:17:27.392373 Validation: avg loss: 1.0220, avg acc: 67.6190%
2023-06-27 23:17:41.575813 Train: epoch: 22 batch: 0/4, loss: 0.019878
2023-06-27 23:18:27.590927 Validation: avg loss: 1.1214, avg acc: 67.6190%
2023-06-27 23:18:41.640636 Train: epoch: 23 batch: 0/4, loss: 0.015436
2023-06-27 23:19:28.790967 Validation: avg loss: 1.2076, avg acc: 69.5238%
2023-06-27 23:19:45.854187 Train: epoch: 24 batch: 0/4, loss: 0.008988
2023-06-27 23:20:39.596887 Validation: avg loss: 1.3185, avg acc: 67.6190%
2023-06-27 23:20:56.522372 Train: epoch: 25 batch: 0/4, loss: 0.010765
2023-06-27 23:21:49.772384 Validation: avg loss: 1.4392, avg acc: 65.7143%
2023-06-27 23:22:06.752625 Train: epoch: 26 batch: 0/4, loss: 0.008803
2023-06-27 23:23:00.704419 Validation: avg loss: 1.5443, avg acc: 64.7619%
2023-06-27 23:23:17.697921 Train: epoch: 27 batch: 0/4, loss: 0.010919
2023-06-27 23:24:11.634059 Validation: avg loss: 1.5816, avg acc: 62.8571%
2023-06-27 23:24:28.635486 Train: epoch: 28 batch: 0/4, loss: 0.020247
2023-06-27 23:25:13.610646 Validation: avg loss: 1.5745, avg acc: 64.7619%
2023-06-27 23:25:27.762826 Train: epoch: 29 batch: 0/4, loss: 0.010510
2023-06-27 23:26:12.795590 Validation: avg loss: 1.5999, avg acc: 61.9048%
2023-06-27 23:26:27.565790 Train: epoch: 30 batch: 0/4, loss: 0.006730
2023-06-27 23:27:17.502410 Validation: avg loss: 1.5201, avg acc: 66.6667%
2023-06-27 23:27:34.741583 Train: epoch: 31 batch: 0/4, loss: 0.012892
2023-06-27 23:28:28.530721 Validation: avg loss: 1.5273, avg acc: 66.6667%
2023-06-27 23:28:45.886919 Train: epoch: 32 batch: 0/4, loss: 0.007448
2023-06-27 23:29:39.793679 Validation: avg loss: 1.5663, avg acc: 65.7143%
2023-06-27 23:29:56.559218 Train: epoch: 33 batch: 0/4, loss: 0.007973
2023-06-27 23:30:49.808918 Validation: avg loss: 1.6759, avg acc: 64.7619%
2023-06-27 23:31:06.749643 Train: epoch: 34 batch: 0/4, loss: 0.010191
2023-06-27 23:32:00.240737 Validation: avg loss: 1.7061, avg acc: 64.7619%
2023-06-27 23:32:17.436448 Train: epoch: 35 batch: 0/4, loss: 0.009669
2023-06-27 23:33:11.917138 Validation: avg loss: 1.6786, avg acc: 65.7143%
2023-06-27 23:33:29.083028 Train: epoch: 36 batch: 0/4, loss: 0.005688
2023-06-27 23:34:22.249065 Validation: avg loss: 1.6233, avg acc: 64.7619%
2023-06-27 23:34:36.440779 Train: epoch: 37 batch: 0/4, loss: 0.011351
2023-06-27 23:35:21.608001 Validation: avg loss: 1.4958, avg acc: 64.7619%
2023-06-27 23:35:35.757051 Train: epoch: 38 batch: 0/4, loss: 0.013235
2023-06-27 23:36:21.070012 Validation: avg loss: 1.4995, avg acc: 65.7143%
2023-06-27 23:36:35.183467 Train: epoch: 39 batch: 0/4, loss: 0.004506
2023-06-27 23:37:19.755812 Validation: avg loss: 1.5822, avg acc: 66.6667%
2023-06-27 23:37:33.834424 Train: epoch: 40 batch: 0/4, loss: 0.004020
2023-06-27 23:38:18.418379 Validation: avg loss: 1.5952, avg acc: 65.7143%
2023-06-27 23:38:32.532155 Train: epoch: 41 batch: 0/4, loss: 0.006036
2023-06-27 23:39:17.399217 Validation: avg loss: 1.5330, avg acc: 64.7619%
2023-06-27 23:39:31.655960 Train: epoch: 42 batch: 0/4, loss: 0.002260
2023-06-27 23:40:16.697558 Validation: avg loss: 1.5543, avg acc: 63.8095%
2023-06-27 23:40:31.012751 Train: epoch: 43 batch: 0/4, loss: 0.001725
2023-06-27 23:41:16.613344 Validation: avg loss: 1.6196, avg acc: 61.9048%
2023-06-27 23:41:31.058310 Train: epoch: 44 batch: 0/4, loss: 0.002373
2023-06-27 23:42:16.007442 Validation: avg loss: 2.0231, avg acc: 64.7619%
2023-06-27 23:42:30.329439 Train: epoch: 45 batch: 0/4, loss: 0.003343
2023-06-27 23:43:15.956035 Validation: avg loss: 2.3722, avg acc: 61.9048%
2023-06-27 23:43:30.537132 Train: epoch: 46 batch: 0/4, loss: 0.020183
2023-06-27 23:44:15.327968 Validation: avg loss: 1.6412, avg acc: 63.8095%
2023-06-27 23:44:29.571036 Train: epoch: 47 batch: 0/4, loss: 0.005822
2023-06-27 23:45:14.703431 Validation: avg loss: 1.7010, avg acc: 62.8571%
2023-06-27 23:45:28.895749 Train: epoch: 48 batch: 0/4, loss: 0.009988
2023-06-27 23:46:14.372640 Validation: avg loss: 1.6787, avg acc: 64.7619%
2023-06-27 23:46:28.549947 Train: epoch: 49 batch: 0/4, loss: 0.004813
2023-06-27 23:47:13.802909 Validation: avg loss: 1.7751, avg acc: 64.7619%
2023-06-27 23:47:27.910214 Train: epoch: 50 batch: 0/4, loss: 0.005619
2023-06-27 23:48:12.643633 Validation: avg loss: 1.8753, avg acc: 60.9524%
2023-06-27 23:48:12.676545 cuda
2023-06-27 23:48:26.762922 Train: epoch: 1 batch: 0/4, loss: 0.692719
2023-06-27 23:49:11.360796 Validation: avg loss: 0.6874, avg acc: 59.0476%
2023-06-27 23:49:25.567329 Train: epoch: 2 batch: 0/4, loss: 0.694902
2023-06-27 23:50:10.296476 Validation: avg loss: 0.6767, avg acc: 59.0476%
2023-06-27 23:50:24.635274 Train: epoch: 3 batch: 0/4, loss: 0.748423
2023-06-27 23:51:09.163575 Validation: avg loss: 0.6772, avg acc: 59.0476%
2023-06-27 23:51:23.470784 Train: epoch: 4 batch: 0/4, loss: 0.668663
2023-06-27 23:52:07.813810 Validation: avg loss: 0.6772, avg acc: 59.0476%
2023-06-27 23:52:21.899027 Train: epoch: 5 batch: 0/4, loss: 0.706061
2023-06-27 23:53:07.070617 Validation: avg loss: 0.6767, avg acc: 59.0476%
2023-06-27 23:53:21.447441 Train: epoch: 6 batch: 0/4, loss: 0.642712
2023-06-27 23:54:06.452342 Validation: avg loss: 0.6765, avg acc: 59.0476%
2023-06-27 23:54:20.942774 Train: epoch: 7 batch: 0/4, loss: 0.599767
2023-06-27 23:55:06.229018 Validation: avg loss: 0.6817, avg acc: 59.0476%
2023-06-27 23:55:20.888484 Train: epoch: 8 batch: 0/4, loss: 0.571694
2023-06-27 23:56:06.082262 Validation: avg loss: 0.7155, avg acc: 59.0476%
2023-06-27 23:56:20.563207 Train: epoch: 9 batch: 0/4, loss: 0.544862
2023-06-27 23:57:05.395680 Validation: avg loss: 0.6855, avg acc: 59.0476%
2023-06-27 23:57:19.790148 Train: epoch: 10 batch: 0/4, loss: 0.440930
2023-06-27 23:58:04.789425 Validation: avg loss: 0.6718, avg acc: 59.0476%
2023-06-27 23:58:19.325372 Train: epoch: 11 batch: 0/4, loss: 0.377782
2023-06-27 23:59:04.998068 Validation: avg loss: 0.6241, avg acc: 59.0476%
2023-06-27 23:59:19.486408 Train: epoch: 12 batch: 0/4, loss: 0.324883
2023-06-28 00:00:04.747729 Validation: avg loss: 0.6436, avg acc: 60.0000%
2023-06-28 00:00:19.341636 Train: epoch: 13 batch: 0/4, loss: 0.278890
2023-06-28 00:01:04.832568 Validation: avg loss: 0.5584, avg acc: 66.6667%
2023-06-28 00:01:19.377407 Train: epoch: 14 batch: 0/4, loss: 0.166610
2023-06-28 00:02:04.890031 Validation: avg loss: 0.5498, avg acc: 71.4286%
2023-06-28 00:02:19.093233 Train: epoch: 15 batch: 0/4, loss: 0.137812
2023-06-28 00:03:03.813998 Validation: avg loss: 0.5520, avg acc: 72.3810%
2023-06-28 00:03:17.912250 Train: epoch: 16 batch: 0/4, loss: 0.082624
2023-06-28 00:04:02.792006 Validation: avg loss: 0.5491, avg acc: 77.1429%
2023-06-28 00:04:17.202393 Train: epoch: 17 batch: 0/4, loss: 0.068832
2023-06-28 00:05:02.498994 Validation: avg loss: 0.5363, avg acc: 80.9524%
2023-06-28 00:05:16.720201 Train: epoch: 18 batch: 0/4, loss: 0.040624
2023-06-28 00:06:01.866280 Validation: avg loss: 0.5907, avg acc: 76.1905%
2023-06-28 00:06:16.861147 Train: epoch: 19 batch: 0/4, loss: 0.026078
2023-06-28 00:07:01.990593 Validation: avg loss: 0.7162, avg acc: 72.3810%
2023-06-28 00:07:16.178375 Train: epoch: 20 batch: 0/4, loss: 0.032948
2023-06-28 00:08:01.200749 Validation: avg loss: 0.8366, avg acc: 69.5238%
2023-06-28 00:08:15.625394 Train: epoch: 21 batch: 0/4, loss: 0.019867
2023-06-28 00:09:00.661095 Validation: avg loss: 0.8742, avg acc: 69.5238%
2023-06-28 00:09:14.899593 Train: epoch: 22 batch: 0/4, loss: 0.019119
2023-06-28 00:09:59.624718 Validation: avg loss: 0.8526, avg acc: 72.3810%
2023-06-28 00:10:13.976701 Train: epoch: 23 batch: 0/4, loss: 0.013461
2023-06-28 00:10:59.149665 Validation: avg loss: 0.8197, avg acc: 74.2857%
2023-06-28 00:11:13.582789 Train: epoch: 24 batch: 0/4, loss: 0.011778
2023-06-28 00:11:58.923315 Validation: avg loss: 0.8129, avg acc: 73.3333%
2023-06-28 00:12:13.196466 Train: epoch: 25 batch: 0/4, loss: 0.008090
2023-06-28 00:12:58.233841 Validation: avg loss: 0.8435, avg acc: 74.2857%
2023-06-28 00:13:12.497409 Train: epoch: 26 batch: 0/4, loss: 0.007485
2023-06-28 00:13:58.058072 Validation: avg loss: 0.8873, avg acc: 73.3333%
2023-06-28 00:14:12.557283 Train: epoch: 27 batch: 0/4, loss: 0.012831
2023-06-28 00:14:57.797601 Validation: avg loss: 0.9106, avg acc: 77.1429%
2023-06-28 00:15:12.113567 Train: epoch: 28 batch: 0/4, loss: 0.008352
2023-06-28 00:15:57.620574 Validation: avg loss: 0.8554, avg acc: 75.2381%
2023-06-28 00:16:11.834754 Train: epoch: 29 batch: 0/4, loss: 0.003746
2023-06-28 00:16:57.506909 Validation: avg loss: 0.8870, avg acc: 74.2857%
2023-06-28 00:17:11.843754 Train: epoch: 30 batch: 0/4, loss: 0.006036
2023-06-28 00:17:56.766811 Validation: avg loss: 0.7923, avg acc: 76.1905%
2023-06-28 00:18:10.978173 Train: epoch: 31 batch: 0/4, loss: 0.003501
2023-06-28 00:18:55.909253 Validation: avg loss: 0.7713, avg acc: 78.0952%
2023-06-28 00:19:10.124389 Train: epoch: 32 batch: 0/4, loss: 0.003706
2023-06-28 00:19:55.141533 Validation: avg loss: 0.7787, avg acc: 77.1429%
2023-06-28 00:20:09.240360 Train: epoch: 33 batch: 0/4, loss: 0.002162
2023-06-28 00:20:54.349713 Validation: avg loss: 0.7848, avg acc: 78.0952%
2023-06-28 00:21:08.623282 Train: epoch: 34 batch: 0/4, loss: 0.002487
2023-06-28 00:21:53.630017 Validation: avg loss: 0.7555, avg acc: 77.1429%
2023-06-28 00:22:07.706382 Train: epoch: 35 batch: 0/4, loss: 0.002501
2023-06-28 00:22:52.953638 Validation: avg loss: 0.7596, avg acc: 76.1905%
2023-06-28 00:23:07.442662 Train: epoch: 36 batch: 0/4, loss: 0.001733
2023-06-28 00:23:52.886656 Validation: avg loss: 0.7571, avg acc: 77.1429%
2023-06-28 00:24:07.114257 Train: epoch: 37 batch: 0/4, loss: 0.001552
2023-06-28 00:24:52.605825 Validation: avg loss: 0.7792, avg acc: 77.1429%
2023-06-28 00:25:06.795100 Train: epoch: 38 batch: 0/4, loss: 0.001730
2023-06-28 00:25:52.150007 Validation: avg loss: 0.8097, avg acc: 77.1429%
2023-06-28 00:26:06.181996 Train: epoch: 39 batch: 0/4, loss: 0.001733
2023-06-28 00:26:50.730113 Validation: avg loss: 0.8571, avg acc: 80.0000%
2023-06-28 00:27:04.861965 Train: epoch: 40 batch: 0/4, loss: 0.002980
2023-06-28 00:27:50.246158 Validation: avg loss: 0.9048, avg acc: 75.2381%
2023-06-28 00:28:05.091455 Train: epoch: 41 batch: 0/4, loss: 0.001818
2023-06-28 00:28:50.104469 Validation: avg loss: 0.9164, avg acc: 76.1905%
2023-06-28 00:29:04.291436 Train: epoch: 42 batch: 0/4, loss: 0.002385
2023-06-28 00:29:49.425707 Validation: avg loss: 0.9623, avg acc: 74.2857%
2023-06-28 00:30:03.823110 Train: epoch: 43 batch: 0/4, loss: 0.000947
2023-06-28 00:30:48.510228 Validation: avg loss: 0.9341, avg acc: 75.2381%
2023-06-28 00:31:02.610482 Train: epoch: 44 batch: 0/4, loss: 0.001757
2023-06-28 00:31:47.867238 Validation: avg loss: 0.9372, avg acc: 76.1905%
2023-06-28 00:32:02.121239 Train: epoch: 45 batch: 0/4, loss: 0.001092
2023-06-28 00:32:53.404032 Validation: avg loss: 0.9359, avg acc: 76.1905%
2023-06-28 00:33:07.542866 Train: epoch: 46 batch: 0/4, loss: 0.000694
2023-06-28 00:33:53.051343 Validation: avg loss: 0.9180, avg acc: 75.2381%
2023-06-28 00:34:07.450520 Train: epoch: 47 batch: 0/4, loss: 0.000740
2023-06-28 00:34:52.014761 Validation: avg loss: 0.9117, avg acc: 74.2857%
2023-06-28 00:35:06.127767 Train: epoch: 48 batch: 0/4, loss: 0.000983
2023-06-28 00:35:50.953911 Validation: avg loss: 0.9229, avg acc: 74.2857%
2023-06-28 00:36:05.429212 Train: epoch: 49 batch: 0/4, loss: 0.000797
2023-06-28 00:36:50.611333 Validation: avg loss: 0.9111, avg acc: 75.2381%
2023-06-28 00:37:04.781826 Train: epoch: 50 batch: 0/4, loss: 0.000721
2023-06-28 00:37:49.944599 Validation: avg loss: 0.8921, avg acc: 75.2381%
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment