cpu.c 3.21 KB
Newer Older
rusty1s's avatar
rusty1s committed
1
#ifndef TH_GENERIC_FILE
rusty1s's avatar
rusty1s committed
2
#define TH_GENERIC_FILE "generic/cpu.c"
rusty1s's avatar
rusty1s committed
3
4
#else

rusty1s's avatar
rusty1s committed
5
void scatter_(mul)(int dim, THTensor *output, THLongTensor *index, THTensor *input) {
rusty1s's avatar
faster  
rusty1s committed
6
7
  int64_t n, i, idx;
  n = THLongTensor_size(index, dim);
rusty1s's avatar
rusty1s committed
8
  TH_TENSOR_DIM_APPLY3(real, output, int64_t, index, real, input, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
rusty1s's avatar
faster  
rusty1s committed
9
    for (i = 0; i < n; i++) {
rusty1s's avatar
rusty1s committed
10
11
12
      idx = *(index_data + i * index_stride);
      assertIndexInBoundaries(idx, output_size, TH_TENSOR_DIM_APPLY_counter);
      output_data[idx * output_stride] *= *(input_data + i * input_stride);
rusty1s's avatar
rusty1s committed
13
14
15
    })
}

rusty1s's avatar
rusty1s committed
16
void scatter_(div)(int dim, THTensor *output, THLongTensor *index, THTensor *input) {
rusty1s's avatar
faster  
rusty1s committed
17
18
  int64_t n, i, idx;
  n = THLongTensor_size(index, dim);
rusty1s's avatar
rusty1s committed
19
  TH_TENSOR_DIM_APPLY3(real, output, int64_t, index, real, input, dim, TH_TENSOR_DIM_APPLY3_SIZE_EQ_EXCEPT_DIM,
rusty1s's avatar
faster  
rusty1s committed
20
    for (i = 0; i < n; i++) {
rusty1s's avatar
rusty1s committed
21
22
23
      idx = *(index_data + i * index_stride);
      assertIndexInBoundaries(idx, output_size, TH_TENSOR_DIM_APPLY_counter);
      output_data[idx * output_stride] /= *(input_data + i * input_stride);
rusty1s's avatar
rusty1s committed
24
25
26
    })
}

rusty1s's avatar
rusty1s committed
27
void scatter_(mean)(int dim, THTensor *output, THLongTensor *index, THTensor *input, THTensor *count) {
rusty1s's avatar
faster  
rusty1s committed
28
29
  int64_t n, i, idx;
  n = THLongTensor_size(index, dim);
rusty1s's avatar
rusty1s committed
30
  TH_TENSOR_DIM_APPLY4(real, output, int64_t, index, real, input, real, count, dim,
rusty1s's avatar
faster  
rusty1s committed
31
    for (i = 0; i < n; i++) {
rusty1s's avatar
rusty1s committed
32
33
      idx = *(index_data + i * index_stride);
      assertIndexInBoundaries(idx, output_size, TH_TENSOR_DIM_APPLY_counter);
rusty1s's avatar
rusty1s committed
34
      output_data[idx * output_stride] += *(input_data + i * input_stride);
rusty1s's avatar
rusty1s committed
35
      count_data[idx * count_stride]++;
rusty1s's avatar
rusty1s committed
36
37
38
    })
}

rusty1s's avatar
rusty1s committed
39
void scatter_(max)(int dim, THTensor *output, THLongTensor *index, THTensor *input, THLongTensor *arg) {
rusty1s's avatar
faster  
rusty1s committed
40
41
  int64_t n, i, idx;
  n = THLongTensor_size(index, dim);
rusty1s's avatar
rusty1s committed
42
  TH_TENSOR_DIM_APPLY4(real, output, int64_t, index, real, input, int64_t, arg, dim,
rusty1s's avatar
faster  
rusty1s committed
43
    for (i = 0; i < n; i++) {
rusty1s's avatar
rusty1s committed
44
45
46
47
48
      idx = *(index_data + i * index_stride);
      assertIndexInBoundaries(idx, output_size, TH_TENSOR_DIM_APPLY_counter);
      if (*(input_data + i * input_stride) >= *(output_data + idx * output_stride)) {
        output_data[idx * output_stride] = *(input_data + i * input_stride);
        arg_data[idx * arg_stride] = i;
rusty1s's avatar
rusty1s committed
49
      }
rusty1s's avatar
min max  
rusty1s committed
50
51
52
    })
}

rusty1s's avatar
rusty1s committed
53
void scatter_(min)(int dim, THTensor *output, THLongTensor *index, THTensor *input, THLongTensor *arg) {
rusty1s's avatar
faster  
rusty1s committed
54
55
  int64_t n, i, idx;
  n = THLongTensor_size(index, dim);
rusty1s's avatar
rusty1s committed
56
  TH_TENSOR_DIM_APPLY4(real, output, int64_t, index, real, input, int64_t, arg, dim,
rusty1s's avatar
faster  
rusty1s committed
57
    for (i = 0; i < n; i++) {
rusty1s's avatar
rusty1s committed
58
59
60
61
62
      idx = *(index_data + i * index_stride);
      assertIndexInBoundaries(idx, output_size, TH_TENSOR_DIM_APPLY_counter);
      if (*(input_data + i * input_stride) <= *(output_data + idx * output_stride)) {
        output_data[idx * output_stride] = *(input_data + i * input_stride);
        arg_data[idx * arg_stride] = i;
rusty1s's avatar
rusty1s committed
63
64
65
66
      }
    })
}

rusty1s's avatar
rusty1s committed
67
void index_backward(int dim, THTensor *output, THLongTensor *index, THTensor *grad, THLongTensor *arg) {
rusty1s's avatar
faster  
rusty1s committed
68
69
  int64_t n, i, idx;
  n = THLongTensor_size(index, dim);
rusty1s's avatar
rusty1s committed
70
  TH_TENSOR_DIM_APPLY4(real, output, int64_t, index, real, grad, int64_t, arg, dim,
rusty1s's avatar
faster  
rusty1s committed
71
    for (i = 0; i < n; i++) {
rusty1s's avatar
rusty1s committed
72
73
      idx = *(index_data + i * index_stride);
      if (*(arg_data + idx * arg_stride) == i) output_data[i * output_stride] = *(grad_data + idx * grad_stride);
rusty1s's avatar
min max  
rusty1s committed
74
75
76
    })
}

rusty1s's avatar
rusty1s committed
77
#endif