Commit 25ccd163 authored by Sugon_ldc's avatar Sugon_ldc
Browse files

add model lstm

parents
Pipeline #2078 failed with stages
in 0 seconds
import numpy as np
from sklearn.preprocessing import scale
import ctypes
import joblib
import statsmodels.tsa.api as smt
import random
import json
def label_one_left(labels):
ret = []
for lab in labels:
if lab != 1:
ret.append(0)
else:
ret.append(1)
return ret
def print_label_sta(labels):
sta = {}
n = 0
for lab in labels:
if lab not in sta:
sta[lab] = 1
else:
sta[lab] += 1
n += 1
for xkey in sta:
print("lab:" + str(xkey) + ", n=" + str(sta[xkey]) + ", possession:" + str(sta[xkey]/n))
def load_model(modelfile):
return joblib.load(modelfile)
def save_json(entity, path):
f = open(path, 'w')
json.dump(entity, f)
f.close()
def load_json(path):
f = open(path, 'r')
ret = json.load(f)
f.close()
return ret
def save_joblib(entity, path):
f = open(path, 'wb')
joblib.dump(entity, f)
f.close()
def load_joblib(path):
f = open(path, 'rb')
ret = joblib.load(f)
f.close()
return ret
def quality_poor(seq, nrepeat=7):
curNum = -1
sameInRow = 1
for x in seq:
if x != curNum:
sameInRow = 1
curNum = x
else:
sameInRow = sameInRow + 1
if sameInRow >= nrepeat:
return True
return False
def scale_by(featsets, scale_factors):
std_feats = []
npfeats = np.array(featsets)
np_sf = np.array(scale_factors)
for one in npfeats:
featbuf = (one - np_sf[0]) / np_sf[1]
std_feats.append(featbuf)
return std_feats
def decrease100(rris):
pnn100 = 0
for i in range(1, len(rris)):
if (rris[i] - rris[i - 1]) < 100:
pnn100 += 1
return pnn100 / (len(rris) - 1)
def serial_dscp(ser):
ret = []
#mean
ret.append(np.mean(ser))#0,1
#max
smax = np.max(ser)
ret.append(float(smax))#1,2
#min
smin = np.min(ser)
ret.append(float(smin))#2,3
#dist
ret.append(float(smax - smin))#3,4
#std
ret.append(np.std(ser))#4,5
#median
ret.append(np.median(ser))#5,6
return ret
def get_z_serial(ser, avg, std, maskval=None):
ret = []
for i in range(0, len(ser)):
if ser[i] != maskval:
ret.append((ser[i] - avg) / std)
else:
ret.append(maskval)
return ret
def auto_corr_ser(dat, nlag, maskval=-999999):
subsec = []
begmasklen = 0
for one in dat:
if one != maskval:
subsec.append(one)
else:
begmasklen += 1
autoreg = smt.stattools.acf(subsec, nlags=nlag - begmasklen)
ret = [maskval] * begmasklen
ret.extend(autoreg)
return ret
def adj_diff(dat):
ret = []
for i in range(1, len(dat)):
ret.append(dat[i] - dat[i - 1])
return ret
def diff_rms(dat):
diff = []
for i in range(1, len(dat)):
diff.append((dat[i] - dat[i - 1])**2)
dsum = 0
for j in range(0, len(diff)):
dsum += diff[j]
dsum = dsum / len(dat)
return (dsum ** 0.5)
def arrs_xtd_paras(arrs):
long_arr = []
for arr in arrs:
long_arr.extend(arr)
return np.mean(long_arr), np.std(long_arr)
def full_distance(rris):
ret = 0
for i in range(1, len(rris)):
ret += np.abs(rris[i] - rris[i - 1])
return ret
def scale(mat):
avgs = []
stds = []
results = []
ncase = len(mat)
nfeat = len(mat[0])
for j in range(0, nfeat):
vals = []
for i in range(0, ncase):
vals.append(mat[i][j])
avgs.append(np.mean(vals))
xstd = np.std(vals)
if xstd == 0:
xstd = 1
stds.append(xstd)
for r in range(0, ncase):
row = []
for c in range(0, nfeat):
row.append((mat[r][c] - avgs[c]) / stds[c])
results.append(row)
return results, avgs, stds
#tar high not included
def binarilize(tar_low, tar_high, dat):
ret = []
for one in dat:
if tar_low <= one < tar_high:
ret.append(1)
else:
ret.append(0)
return ret
def multiple_split(feats, labs2d, test_size):
xlen = len(feats)
idx = list(range(0, xlen))
ntest = int(xlen * test_size)
test_idx = random.sample(idx, ntest)
ny = len(labs2d)
X = []
Xt = []
y = []
yt = []
for i in range(0, ny):
y.append([])
yt.append([])
for i in range(0, xlen):
if i in test_idx:
Xt.append(feats[i])
for j in range(0, ny):
yt[j].append(labs2d[j][i])
else:
X.append(feats[i])
for k in range(0, ny):
y[k].append(labs2d[k][i])
return X, Xt, y, yt
def linear_soomth(src, mask=-1):
for i in range(0, len(src)):
if src[i] == mask:
begi=-1
endi=-1
#search backward
for j in range(i-1, -1, -1):
if src[j] != mask:
begi = j
break
#search forward
for k in range(i+1, len(src), 1):
if src[k] != mask:
endi = k
break
if begi != mask and endi != mask:
full_stride = endi - begi
loc_stride = i - begi
stepval = (src[endi] - src[begi]) / full_stride
src[i] = src[begi] + (stepval * loc_stride)
elif begi == mask and endi != mask:
src[i] = src[endi]
elif begi != mask and endi == mask:
src[i] = src[begi]
return src
def gen_rrix211():
x = [730,720,790,810,760,690,710,660,670,690,720,750,\
750,800,820,760,710,730,780,750,760,790,1000,830,860,880,880,\
820,850,850,880,830,860,880,840,830,890,910,860,880,890,810,\
830,830,810,820,860,820,850,880,820,870,900,850,890,920,890,\
910,920,870,940,950,880,920,930,900,940,950,930,950,980,970,\
980,950,920,950,960,890,910,940,900,920,910,870,880,890,860,\
860,860,830,820,860,830,880,910,910,870,930,940,900,940,970,\
910,890,910,730,720,790,810,760,690,710,660,670,690,720,750,\
750,800,820,760,710,730,780,750,760,790,860,830,860,880,880,\
820,850,850,880,830,860,880,840,830,890,910,860,880,890,810,\
830,830,810,820,860,820,850,880,820,870,900,850,890,920,890,\
910,920,870,940,950,880,920,930,900,940,950,930,950,980,970,\
980,950,920,950,960,890,910,940,900,920,910,870,880,890,860,\
860,860,830,820,860,830,880,910,910,870,930,940,1200,940,970,\
910,890,910,1000]
return x
if __name__ == "__main__":
# print(len(gen_rrix211()))
x = [-1, 800, 900, -1, -1, 1200, -1, 1500, -1]
xsmooth = linear_soomth(x)
print(xsmooth)
z = 0
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment