GPT2.h 1.09 KB
Newer Older
yangql's avatar
yangql committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
#ifndef __GPT2_H__
#define __GPT2_H__

#include <cstdint>
#include <string>
#include <tokenization.h>
#include <onnxruntime/core/session/onnxruntime_cxx_api.h>
namespace ortSamples
{
    typedef enum _ErrorCode
    {
        SUCCESS=0, 
        MODEL_NOT_EXIST, 
        CONFIG_FILE_NOT_EXIST, 
        FAIL_TO_LOAD_MODEL, 
        FAIL_TO_OPEN_CONFIG_FILE, 
    }ErrorCode;

    typedef struct _Predictions
    {
        long unsigned int index;
        float predictionvalue;
    }Predictions;

class GPT2
{

public:
    GPT2();
    
    ~GPT2();

    ErrorCode Initialize();

    ErrorCode Preprocessing(cuBERT::FullTokenizer tokenizer,
                             char *question,
                             std::vector<long unsigned int> &input_id);

    long unsigned int Inference(const std::vector<long unsigned int> &input_id);

private:
    std::vector<const char*> input_node_names;
    std::vector<const char*> output_node_names;
    Ort::Session *session;
    Ort::Env env = Ort::Env(ORT_LOGGING_LEVEL_ERROR, "ONNXRuntime");
    Ort::SessionOptions sessionOptions = Ort::SessionOptions();
};

}
#endif