Commit 06924f5d authored by mayong's avatar mayong
Browse files

Add the src of libs.

parent 83ff3a7f
#ifndef WENETPARAMS_H
#define WENETPARAMS_H
// #pragma pack(1)
#define vocab_size 5538
typedef struct {
float conv0_weight[512 * 9];
float conv0_bias[512];
float conv1_weight[512 * 512 * 9];
float conv1_bias[512];
float out0_weight[9728 * 512];
float out0_bias[512];
} EncEmbedParams;
typedef struct {
float linear_q_weight[512 * 512];
float linear_q_bias[512];
float linear_k_weight[512 * 512];
float linear_k_bias[512];
float linear_v_weight[512 * 512];
float linear_v_bias[512];
float linear_out_weight[512 * 512];
float linear_out_bias[512];
} SelfAttnParams;
typedef struct {
SelfAttnParams linear0;
float linear_pos_weight[512 * 512];
float pos_bias_u[512];
float pos_bias_v[512];
} EncSelfAttnParams;
typedef struct {
float w1_weight[512 * 2048];
float w1_bias[2048];
float w2_weight[2048 * 512];
float w2_bias[512];
} FeedForwardParams;
typedef struct {
float weight[512];
float bias[512];
} NormParams;
typedef struct {
float pointwise_conv1_weight[1024 * 512];
float pointwise_conv1_bias[1024];
float depthwise_conv_weight[512 * 15];
float depthwise_conv_bias[512];
float pointwise_conv2_weight[512 * 512];
float pointwise_conv2_bias[512];
NormParams norm;
} EncConvParams;
typedef struct {
EncSelfAttnParams self_attn;
FeedForwardParams feedforward;
FeedForwardParams feedforward_macaron;
EncConvParams conv_module;
NormParams norm_ff;
NormParams norm_mha;
NormParams norm_macaron;
NormParams norm_conv;
NormParams norm_final;
// float concat_weight[1024 * 512];
// float concat_bias[512];
} SubEncoderParams;
typedef struct {
EncEmbedParams embed;
SubEncoderParams sub_encoder[12];
NormParams after_norm;
} EncoderParams;
typedef struct {
SelfAttnParams self_attn;
SelfAttnParams src_attn;
FeedForwardParams feedward;
NormParams norm1;
NormParams norm2;
NormParams norm3;
// float concat_weight1[1024 * 512];
// float concat_bias1[512];
// float concat_weight2[1024 * 512];
// float concat_bias2[512];
} SubDecoderParams;
typedef struct {
float embed_weight[vocab_size * 512];
SubDecoderParams sub_decoder[6];
NormParams after_norm;
float output_weight[vocab_size * 512];
float output_bias[vocab_size];
} DecoderParams;
typedef struct {
EncoderParams encoder;
float ctc_weight[512 * vocab_size];
float ctc_bias[vocab_size];
DecoderParams decoder;
} WenetParams;
// #pragma pack()
#endif
#include "precomp.h"
float *loadparams(const char *filename)
{
FILE *fp;
fp = fopen(filename, "rb");
fseek(fp, 0, SEEK_END);
uint32_t nFileLen = ftell(fp);
fseek(fp, 0, SEEK_SET);
float *params_addr = (float *)aligned_malloc(32, nFileLen);
int n = fread(params_addr, 1, nFileLen, fp);
fclose(fp);
return params_addr;
}
int val_align(int val, int align)
{
float tmp = ceil((float)val / (float)align) * (float)align;
return (int)tmp;
}
void disp_params(float *din, int size)
{
int i;
for (i = 0; i < size; i++) {
printf("%f ", din[i]);
}
printf("\n");
}
void SaveDataFile(const char *filename, void *data, uint32_t len)
{
FILE *fp;
fp = fopen(filename, "wb+");
fwrite(data, 1, len, fp);
fclose(fp);
}
void basic_norm(Tensor<float> *&din, float norm)
{
int Tmax = din->size[2];
int i, j;
for (i = 0; i < Tmax; i++) {
float sum = 0;
for (j = 0; j < 512; j++) {
int ii = i * 512 + j;
sum += din->buff[ii] * din->buff[ii];
}
float mean = sqrt(sum / 512 + norm);
for (j = 0; j < 512; j++) {
int ii = i * 512 + j;
din->buff[ii] = din->buff[ii] / mean;
}
}
}
void findmax(float *din, int len, float &max_val, int &max_idx)
{
int i;
max_val = -INFINITY;
max_idx = -1;
for (i = 0; i < len; i++) {
if (din[i] > max_val) {
max_val = din[i];
max_idx = i;
}
}
}
string pathAppend(const string &p1, const string &p2)
{
char sep = '/';
string tmp = p1;
#ifdef _WIN32
sep = '\\';
#endif
if (p1[p1.length()-1] != sep) { // Need to add a
tmp += sep; // path separator
return (tmp + p2);
} else
return (p1 + p2);
}
void relu(Tensor<float> *din)
{
int i;
for (i = 0; i < din->buff_size; i++) {
float val = din->buff[i];
din->buff[i] = val < 0 ? 0 : val;
}
}
void swish(Tensor<float> *din)
{
int i;
for (i = 0; i < din->buff_size; i++) {
float val = din->buff[i];
din->buff[i] = val / (1 + exp(-val));
}
}
void sigmoid(Tensor<float> *din)
{
int i;
for (i = 0; i < din->buff_size; i++) {
float val = din->buff[i];
din->buff[i] = 1 / (1 + exp(-val));
}
}
void doubleswish(Tensor<float> *din)
{
int i;
for (i = 0; i < din->buff_size; i++) {
float val = din->buff[i];
din->buff[i] = val / (1 + exp(-val + 1));
}
}
void softmax(float *din, int mask, int len)
{
float *tmp = (float *)malloc(mask * sizeof(float));
int i;
float sum = 0;
float max = -INFINITY;
for (i = 0; i < mask; i++) {
max = max < din[i] ? din[i] : max;
}
for (i = 0; i < mask; i++) {
tmp[i] = exp(din[i] - max);
sum += tmp[i];
}
for (i = 0; i < mask; i++) {
din[i] = tmp[i] / sum;
}
free(tmp);
for (i = mask; i < len; i++) {
din[i] = 0;
}
}
void log_softmax(float *din, int len)
{
float *tmp = (float *)malloc(len * sizeof(float));
int i;
float sum = 0;
for (i = 0; i < len; i++) {
tmp[i] = exp(din[i]);
sum += tmp[i];
}
for (i = 0; i < len; i++) {
din[i] = log(tmp[i] / sum);
}
free(tmp);
}
void glu(Tensor<float> *din, Tensor<float> *dout)
{
int mm = din->buff_size / 1024;
int i, j;
for (i = 0; i < mm; i++) {
for (j = 0; j < 512; j++) {
int in_off = i * 1024 + j;
int out_off = i * 512 + j;
float a = din->buff[in_off];
float b = din->buff[in_off + 512];
dout->buff[out_off] = a / (1 + exp(-b));
}
}
}
#ifndef UTIL_H
#define UTIL_H
#include "Tensor.h"
#include <iostream>
using namespace std;
extern float *loadparams(const char *filename);
extern void SaveDataFile(const char *filename, void *data, uint32_t len);
extern void relu(Tensor<float> *din);
extern void swish(Tensor<float> *din);
extern void sigmoid(Tensor<float> *din);
extern void doubleswish(Tensor<float> *din);
extern void softmax(float *din, int mask, int len);
extern void log_softmax(float *din, int len);
extern int val_align(int val, int align);
extern void disp_params(float *din, int size);
extern void basic_norm(Tensor<float> *&din, float norm);
extern void findmax(float *din, int len, float &max_val, int &max_idx);
extern void glu(Tensor<float> *din, Tensor<float> *dout);
string pathAppend(const string &p1, const string &p2);
#endif
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment