encoding_lib.h 4.45 KB
Newer Older
Hang Zhang's avatar
init  
Hang Zhang committed
1
2
3
4
5
6
7
8
9
10
11
12
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 * Created by: Hang Zhang
 * ECE Department, Rutgers University
 * Email: zhang.hang@rutgers.edu
 * Copyright (c) 2017
 *
 * This source code is licensed under the MIT-style license found in the
 * LICENSE file in the root directory of this source tree 
 *+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 */

/*
Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
13
14
15
16
17
18
#include <THC/THC.h>

#ifdef __cplusplus
extern "C" {
#endif

Hang Zhang's avatar
init  
Hang Zhang committed
19
20
21
22
#define Encoding_(NAME) TH_CONCAT_4(Encoding_, Real, _, NAME)
#define THCTensor        TH_CONCAT_3(TH,CReal,Tensor)
#define THCTensor_(NAME) TH_CONCAT_4(TH,CReal,Tensor_,NAME)

Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
23
// float
Hang Zhang's avatar
init  
Hang Zhang committed
24
25
#include "generic/encoding_generic.h"
#include "THC/THCGenerateFloatType.h"
Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
26
27
28
29
30
31
32

#include "generic/syncbn_generic.h"
#include "THC/THCGenerateFloatType.h"

#ifdef __cplusplus
}
#endif
Hang Zhang's avatar
init  
Hang Zhang committed
33
34
*/

35
36
37
38
39
40
41
int Encoding_Float_scaledl2_forward(THCudaTensor *SL,  
    THCudaTensor *X, THCudaTensor *C, THCudaTensor *S);

int Encoding_Float_scaledl2_backward(
    THCudaTensor *GSL, THCudaTensor *GX, THCudaTensor *GC,
    THCudaTensor *X, THCudaTensor *C, THCudaTensor *S);

Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
42
int Encoding_Float_aggregate_forward(THCudaTensor *E, THCudaTensor *A,
43
44
			THCudaTensor *X, THCudaTensor *C);

Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
45
int Encoding_Float_aggregate_backward(THCudaTensor *GA, THCudaTensor *GE, 
46
47
		THCudaTensor *A, THCudaTensor *X, THCudaTensor *C);

Hang Zhang's avatar
Hang Zhang committed
48
int Encoding_Float_batchnorm_Forward(THCudaTensor *output_, 
49
50
    THCudaTensor *input_, THCudaTensor *mean_, 
    THCudaTensor *invstd_, THCudaTensor *gamma_, THCudaTensor *beta_);
Hang Zhang's avatar
Hang Zhang committed
51
52

int Encoding_Float_batchnorm_Backward(THCudaTensor *gradoutput_, 
53
54
55
56
57
    THCudaTensor *input_, THCudaTensor *gradinput_, 
    THCudaTensor *gradgamma_, THCudaTensor *gradbeta_, 
    THCudaTensor *mean_, THCudaTensor *invstd_, 
    THCudaTensor *gamma_,THCudaTensor *beta_, 
    THCudaTensor *gradMean_, THCudaTensor *gradStd_, int train);
Hang Zhang's avatar
Hang Zhang committed
58
59

int Encoding_Float_sum_square_Forward(THCudaTensor *input_, 
60
    THCudaTensor *sum_, THCudaTensor *square_);
Hang Zhang's avatar
Hang Zhang committed
61

Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
62
int Encoding_Float_sum_square_Backward(
63
64
65
    THCudaTensor *gradInput, THCudaTensor *input_, 
    THCudaTensor *gradSum_, THCudaTensor *gradSquare_);

Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
66
67
68
69
70
71
72
73
74
75
76
77
int Encoding_Float_DilatedAvgPool2d_Forward(
    THCudaTensor *X_, THCudaTensor *Y_, 
    int kH, int kW, int dH, int dW,
    int padH, int padW,
    int dilationH, int dilationW);

int Encoding_Float_DilatedAvgPool2d_Backward(
    THCudaTensor *gradX_, THCudaTensor *gradY_, 
    int kH, int kW, int dH, int dW,
    int padH, int padW,
    int dilationH, int dilationW);

78
79
80
81
82
83
84
85
86
87
/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++*/

int Encoding_Double_scaledl2_forward(THCudaDoubleTensor *SL,  
    THCudaDoubleTensor *X, THCudaDoubleTensor *C,  THCudaDoubleTensor *S);

int Encoding_Double_scaledl2_backward(
    THCudaDoubleTensor *GSL, THCudaDoubleTensor *GX, 
    THCudaDoubleTensor *GC, THCudaDoubleTensor *X, 
    THCudaDoubleTensor *C, THCudaDoubleTensor *S);

Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
88
int Encoding_Double_aggregate_forward(THCudaDoubleTensor *E, 
89
90
    THCudaDoubleTensor *A, THCudaDoubleTensor *X, THCudaDoubleTensor *C);

Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
91
int Encoding_Double_aggregate_backward(THCudaDoubleTensor *GA, 
92
93
94
    THCudaDoubleTensor *GE, THCudaDoubleTensor *A, THCudaDoubleTensor *X, 
    THCudaDoubleTensor *C);

Hang Zhang's avatar
Hang Zhang committed
95
int Encoding_Double_batchnorm_Forward(THCudaDoubleTensor *output_, 
96
97
98
    THCudaDoubleTensor *input_, THCudaDoubleTensor *mean_, 
    THCudaDoubleTensor *invstd_, THCudaDoubleTensor *gamma_, 
    THCudaDoubleTensor *beta_);
Hang Zhang's avatar
Hang Zhang committed
99
100

int Encoding_Double_batchnorm_Backward(THCudaDoubleTensor *gradoutput_, 
101
102
103
104
105
106
    THCudaDoubleTensor *input_, THCudaDoubleTensor *gradinput_, 
    THCudaDoubleTensor *gradgamma_, THCudaDoubleTensor *gradbeta_, 
    THCudaDoubleTensor *mean_, THCudaDoubleTensor *invstd_, 
    THCudaDoubleTensor *gamma_, THCudaDoubleTensor *beta_, 
    THCudaDoubleTensor *gradMean_, THCudaDoubleTensor *gradStd_, 
    int train);
Hang Zhang's avatar
Hang Zhang committed
107
108

int Encoding_Double_sum_square_Forward(THCudaDoubleTensor *input_, 
109
    THCudaDoubleTensor *sum_, THCudaDoubleTensor *square_);
Hang Zhang's avatar
Hang Zhang committed
110
111

void Encoding_Double_sum_square_Backward(
112
113
    THCudaDoubleTensor *gradInput, THCudaDoubleTensor *input_, 
    THCudaDoubleTensor *gradSum_, THCudaDoubleTensor *gradSquare_);
Hang Zhang's avatar
v1.0.1  
Hang Zhang committed
114
115
116
117
118
119
120
121
122
123
124
125

int Encoding_Double_DilatedAvgPool2d_Forward(
    THCudaDoubleTensor *X_, THCudaDoubleTensor *Y_, 
    int kH, int kW, int dH, int dW,
    int padH, int padW,
    int dilationH, int dilationW);

int Encoding_Double_DilatedAvgPool2d_Backward(
    THCudaDoubleTensor *gradX_, THCudaDoubleTensor *gradY_, 
    int kH, int kW, int dH, int dW,
    int padH, int padW,
    int dilationH, int dilationW);
Hang Zhang's avatar
v0.1.0  
Hang Zhang committed
126