video_reader.cpp 21.8 KB
Newer Older
1
2
#include "video_reader.h"

3
#ifdef USE_PYTHON
4
#include <Python.h>
5
#endif
6

7
8
#include "../decoder/memory_buffer.h"
#include "../decoder/sync_decoder.h"
9

10
#ifdef USE_PYTHON
11
12
13
14
15
16
17
18
// If we are in a Windows environment, we need to define
// initialization functions for the _custom_ops extension
#ifdef _WIN32
PyMODINIT_FUNC PyInit_video_reader(void) {
  // No need to do anything.
  return NULL;
}
#endif
19
#endif // USE_PYTHONs
20

21
22
23
using namespace ffmpeg;

namespace vision {
24
25
namespace video_reader {

26
27
namespace {

28
29
const AVPixelFormat defaultVideoPixelFormat = AV_PIX_FMT_RGB24;
const AVSampleFormat defaultAudioSampleFormat = AV_SAMPLE_FMT_FLT;
30
const AVRational timeBaseQ = AVRational{1, AV_TIME_BASE};
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
const size_t decoderTimeoutMs = 600000;
// A jitter can be added to the end of the range to avoid conversion/rounding
// error, small value 100us won't be enough to select the next frame, but enough
// to compensate rounding error due to the multiple conversions.
const size_t timeBaseJitterUs = 100;

DecoderParameters getDecoderParams(
    int64_t videoStartUs,
    int64_t videoEndUs,
    double seekFrameMarginUs,
    int64_t getPtsOnly,
    int64_t readVideoStream,
    int videoWidth,
    int videoHeight,
    int videoMinDimension,
    int videoMaxDimension,
    int64_t readAudioStream,
    int audioSamples,
    int audioChannels) {
  DecoderParameters params;
  params.headerOnly = getPtsOnly != 0;
  params.seekAccuracy = seekFrameMarginUs;
  params.startOffset = videoStartUs;
  params.endOffset = videoEndUs;
  params.timeoutMs = decoderTimeoutMs;
  params.preventStaleness = false;
57

58
59
60
61
62
63
64
65
66
  if (readVideoStream == 1) {
    MediaFormat videoFormat(0);
    videoFormat.type = TYPE_VIDEO;
    videoFormat.format.video.format = defaultVideoPixelFormat;
    videoFormat.format.video.width = videoWidth;
    videoFormat.format.video.height = videoHeight;
    videoFormat.format.video.minDimension = videoMinDimension;
    videoFormat.format.video.maxDimension = videoMaxDimension;
    params.formats.insert(videoFormat);
67
68
  }

69
70
71
72
73
74
75
76
  if (readAudioStream == 1) {
    MediaFormat audioFormat;
    audioFormat.type = TYPE_AUDIO;
    audioFormat.format.audio.format = defaultAudioSampleFormat;
    audioFormat.format.audio.samples = audioSamples;
    audioFormat.format.audio.channels = audioChannels;
    params.formats.insert(audioFormat);
  }
77

78
79
  return params;
}
80

81
82
83
84
85
86
87
88
89
90
91
92
93
// returns number of written bytes
template <typename T>
size_t fillTensor(
    std::vector<DecoderOutputMessage>& msgs,
    torch::Tensor& frame,
    torch::Tensor& framePts,
    int64_t num,
    int64_t den) {
  if (msgs.empty()) {
    return 0;
  }
  T* frameData = frame.numel() > 0 ? frame.data_ptr<T>() : nullptr;
  int64_t* framePtsData = framePts.data_ptr<int64_t>();
94
  TORCH_CHECK_EQ(framePts.size(0), (int64_t)msgs.size());
95
96
97
98
99
100
  size_t avgElementsInFrame = frame.numel() / msgs.size();

  size_t offset = 0;
  for (size_t i = 0; i < msgs.size(); ++i) {
    const auto& msg = msgs[i];
    // convert pts into original time_base
101
102
    AVRational avr = AVRational{(int)num, (int)den};
    framePtsData[i] = av_rescale_q(msg.header.pts, timeBaseQ, avr);
103
104
105
106
107
108
109
110
111
112
113
114
115
    VLOG(2) << "PTS type: " << sizeof(T) << ", us: " << msg.header.pts
            << ", original: " << framePtsData[i];

    if (frameData) {
      auto sizeInBytes = msg.payload->length();
      memcpy(frameData + offset, msg.payload->data(), sizeInBytes);
      if (sizeof(T) == sizeof(uint8_t)) {
        // Video - move by allocated frame size
        offset += avgElementsInFrame / sizeof(T);
      } else {
        // Audio - move by number of samples
        offset += sizeInBytes / sizeof(T);
      }
116
117
    }
  }
118
  return offset * sizeof(T);
119
120
}

121
122
123
124
125
126
127
size_t fillVideoTensor(
    std::vector<DecoderOutputMessage>& msgs,
    torch::Tensor& videoFrame,
    torch::Tensor& videoFramePts,
    int64_t num,
    int64_t den) {
  return fillTensor<uint8_t>(msgs, videoFrame, videoFramePts, num, den);
128
129
}

130
131
size_t fillAudioTensor(
    std::vector<DecoderOutputMessage>& msgs,
132
    torch::Tensor& audioFrame,
133
134
135
136
    torch::Tensor& audioFramePts,
    int64_t num,
    int64_t den) {
  return fillTensor<float>(msgs, audioFrame, audioFramePts, num, den);
137
138
}

139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
void offsetsToUs(
    double& seekFrameMargin,
    int64_t readVideoStream,
    int64_t videoStartPts,
    int64_t videoEndPts,
    int64_t videoTimeBaseNum,
    int64_t videoTimeBaseDen,
    int64_t readAudioStream,
    int64_t audioStartPts,
    int64_t audioEndPts,
    int64_t audioTimeBaseNum,
    int64_t audioTimeBaseDen,
    int64_t& videoStartUs,
    int64_t& videoEndUs) {
  seekFrameMargin *= AV_TIME_BASE;
  videoStartUs = 0;
  videoEndUs = -1;

  if (readVideoStream) {
158
    AVRational vr = AVRational{(int)videoTimeBaseNum, (int)videoTimeBaseDen};
159
    if (videoStartPts > 0) {
160
      videoStartUs = av_rescale_q(videoStartPts, vr, timeBaseQ);
161
162
163
164
165
    }
    if (videoEndPts > 0) {
      // Add jitter to the end of the range to avoid conversion/rounding error.
      // Small value 100us won't be enough to select the next frame, but enough
      // to compensate rounding error due to the multiple conversions.
166
      videoEndUs = timeBaseJitterUs + av_rescale_q(videoEndPts, vr, timeBaseQ);
167
168
    }
  } else if (readAudioStream) {
169
    AVRational ar = AVRational{(int)audioTimeBaseNum, (int)audioTimeBaseDen};
170
    if (audioStartPts > 0) {
171
      videoStartUs = av_rescale_q(audioStartPts, ar, timeBaseQ);
172
173
174
175
176
    }
    if (audioEndPts > 0) {
      // Add jitter to the end of the range to avoid conversion/rounding error.
      // Small value 100us won't be enough to select the next frame, but enough
      // to compensate rounding error due to the multiple conversions.
177
      videoEndUs = timeBaseJitterUs + av_rescale_q(audioEndPts, ar, timeBaseQ);
178
    }
179
180
181
182
183
184
185
186
187
188
189
190
191
  }
}

torch::List<torch::Tensor> readVideo(
    bool isReadFile,
    const torch::Tensor& input_video,
    std::string videoPath,
    double seekFrameMargin,
    int64_t getPtsOnly,
    int64_t readVideoStream,
    int64_t width,
    int64_t height,
    int64_t minDimension,
192
    int64_t maxDimension,
193
194
195
196
197
198
199
200
201
202
203
    int64_t videoStartPts,
    int64_t videoEndPts,
    int64_t videoTimeBaseNum,
    int64_t videoTimeBaseDen,
    int64_t readAudioStream,
    int64_t audioSamples,
    int64_t audioChannels,
    int64_t audioStartPts,
    int64_t audioEndPts,
    int64_t audioTimeBaseNum,
    int64_t audioTimeBaseDen) {
204
205
206
  int64_t videoStartUs, videoEndUs;

  offsetsToUs(
207
208
209
210
211
212
213
214
215
216
      seekFrameMargin,
      readVideoStream,
      videoStartPts,
      videoEndPts,
      videoTimeBaseNum,
      videoTimeBaseDen,
      readAudioStream,
      audioStartPts,
      audioEndPts,
      audioTimeBaseNum,
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
      audioTimeBaseDen,
      videoStartUs,
      videoEndUs);

  DecoderParameters params = getDecoderParams(
      videoStartUs, // videoStartPts
      videoEndUs, // videoEndPts
      seekFrameMargin, // seekFrameMargin
      getPtsOnly, // getPtsOnly
      readVideoStream, // readVideoStream
      width, // width
      height, // height
      minDimension, // minDimension
      maxDimension, // maxDimension
      readAudioStream, // readAudioStream
      audioSamples, // audioSamples
      audioChannels // audioChannels
  );
235

236
237
238
239
  SyncDecoder decoder;
  std::vector<DecoderOutputMessage> audioMessages, videoMessages;
  DecoderInCallback callback = nullptr;
  std::string logMessage, logType;
240
  if (isReadFile) {
241
242
243
    params.uri = videoPath;
    logType = "file";
    logMessage = videoPath;
244
  } else {
245
246
247
248
    callback = MemoryBuffer::getCallback(
        input_video.data_ptr<uint8_t>(), input_video.size(0));
    logType = "memory";
    logMessage = std::to_string(input_video.size(0));
249
250
  }

251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
  VLOG(1) << "Video decoding from " << logType << " [" << logMessage
          << "] has started";

  const auto now = std::chrono::system_clock::now();

  bool succeeded;
  DecoderMetadata audioMetadata, videoMetadata;
  std::vector<DecoderMetadata> metadata;
  if ((succeeded = decoder.init(params, std::move(callback), &metadata))) {
    for (const auto& header : metadata) {
      if (header.format.type == TYPE_VIDEO) {
        videoMetadata = header;
      } else if (header.format.type == TYPE_AUDIO) {
        audioMetadata = header;
      }
    }
    int res;
    DecoderOutputMessage msg;
    while (0 == (res = decoder.decode(&msg, decoderTimeoutMs))) {
      if (msg.header.format.type == TYPE_VIDEO) {
        videoMessages.push_back(std::move(msg));
      }
      if (msg.header.format.type == TYPE_AUDIO) {
        audioMessages.push_back(std::move(msg));
      }
      msg.payload.reset();
    }
  } else {
    LOG(ERROR) << "Decoder initialization has failed";
  }
  const auto then = std::chrono::system_clock::now();
  VLOG(1) << "Video decoding from " << logType << " [" << logMessage
          << "] has finished, "
          << std::chrono::duration_cast<std::chrono::microseconds>(then - now)
                 .count()
          << " us";

  decoder.shutdown();

290
291
292
293
294
  // video section
  torch::Tensor videoFrame = torch::zeros({0}, torch::kByte);
  torch::Tensor videoFramePts = torch::zeros({0}, torch::kLong);
  torch::Tensor videoTimeBase = torch::zeros({0}, torch::kInt);
  torch::Tensor videoFps = torch::zeros({0}, torch::kFloat);
295
296
  torch::Tensor videoDuration = torch::zeros({0}, torch::kLong);

297
298
299
300
301
302
303
304
  if (succeeded && readVideoStream == 1) {
    if (!videoMessages.empty()) {
      const auto& header = videoMetadata;
      const auto& format = header.format.format.video;
      int numVideoFrames = videoMessages.size();
      int outHeight = format.height;
      int outWidth = format.width;
      int numChannels = 3; // decoder guarantees the default AV_PIX_FMT_RGB24
305

306
      size_t expectedWrittenBytes = 0;
307
308
309
      if (getPtsOnly == 0) {
        videoFrame = torch::zeros(
            {numVideoFrames, outHeight, outWidth, numChannels}, torch::kByte);
310
        expectedWrittenBytes =
311
            (size_t)numVideoFrames * outHeight * outWidth * numChannels;
312
313
314
315
      }

      videoFramePts = torch::zeros({numVideoFrames}, torch::kLong);

316
317
318
319
320
321
322
      VLOG(2) << "video duration: " << header.duration
              << ", fps: " << header.fps << ", num: " << header.num
              << ", den: " << header.den << ", num frames: " << numVideoFrames;

      auto numberWrittenBytes = fillVideoTensor(
          videoMessages, videoFrame, videoFramePts, header.num, header.den);

323
      TORCH_CHECK_EQ(numberWrittenBytes, expectedWrittenBytes);
324
325
326

      videoTimeBase = torch::zeros({2}, torch::kInt);
      int* videoTimeBaseData = videoTimeBase.data_ptr<int>();
327
328
      videoTimeBaseData[0] = header.num;
      videoTimeBaseData[1] = header.den;
329
330
331

      videoFps = torch::zeros({1}, torch::kFloat);
      float* videoFpsData = videoFps.data_ptr<float>();
332
      videoFpsData[0] = header.fps;
333
334
335

      videoDuration = torch::zeros({1}, torch::kLong);
      int64_t* videoDurationData = videoDuration.data_ptr<int64_t>();
336
337
      AVRational vr = AVRational{(int)header.num, (int)header.den};
      videoDurationData[0] = av_rescale_q(header.duration, timeBaseQ, vr);
338
339
      VLOG(1) << "Video decoding from " << logType << " [" << logMessage
              << "] filled video tensors";
340
341
342
343
344
345
346
347
348
349
    } else {
      VLOG(1) << "Miss video stream";
    }
  }

  // audio section
  torch::Tensor audioFrame = torch::zeros({0}, torch::kFloat);
  torch::Tensor audioFramePts = torch::zeros({0}, torch::kLong);
  torch::Tensor audioTimeBase = torch::zeros({0}, torch::kInt);
  torch::Tensor audioSampleRate = torch::zeros({0}, torch::kInt);
350
  torch::Tensor audioDuration = torch::zeros({0}, torch::kLong);
351
352
353
354
355
356
357
358
  if (succeeded && readAudioStream == 1) {
    if (!audioMessages.empty()) {
      const auto& header = audioMetadata;
      const auto& format = header.format.format.audio;

      int64_t outAudioChannels = format.channels;
      int bytesPerSample =
          av_get_bytes_per_sample(static_cast<AVSampleFormat>(format.format));
359

360
361
      int numAudioFrames = audioMessages.size();
      int64_t numAudioSamples = 0;
362
      if (getPtsOnly == 0) {
363
364
365
366
367
        int64_t frameSizeTotal = 0;
        for (auto const& audioMessage : audioMessages) {
          frameSizeTotal += audioMessage.payload->length();
        }

368
        TORCH_CHECK_EQ(frameSizeTotal % (outAudioChannels * bytesPerSample), 0);
369
370
        numAudioSamples = frameSizeTotal / (outAudioChannels * bytesPerSample);

371
372
373
374
        audioFrame =
            torch::zeros({numAudioSamples, outAudioChannels}, torch::kFloat);
      }
      audioFramePts = torch::zeros({numAudioFrames}, torch::kLong);
375
376
377
378
379
380
381
382

      VLOG(2) << "audio duration: " << header.duration
              << ", channels: " << format.channels
              << ", sample rate: " << format.samples << ", num: " << header.num
              << ", den: " << header.den;

      auto numberWrittenBytes = fillAudioTensor(
          audioMessages, audioFrame, audioFramePts, header.num, header.den);
383
      TORCH_CHECK_EQ(
384
385
          numberWrittenBytes,
          numAudioSamples * outAudioChannels * sizeof(float));
386
387
388

      audioTimeBase = torch::zeros({2}, torch::kInt);
      int* audioTimeBaseData = audioTimeBase.data_ptr<int>();
389
390
      audioTimeBaseData[0] = header.num;
      audioTimeBaseData[1] = header.den;
391
392
393

      audioSampleRate = torch::zeros({1}, torch::kInt);
      int* audioSampleRateData = audioSampleRate.data_ptr<int>();
394
      audioSampleRateData[0] = format.samples;
395
396
397

      audioDuration = torch::zeros({1}, torch::kLong);
      int64_t* audioDurationData = audioDuration.data_ptr<int64_t>();
398
399
      AVRational ar = AVRational{(int)header.num, (int)header.den};
      audioDurationData[0] = av_rescale_q(header.duration, timeBaseQ, ar);
400
401
      VLOG(1) << "Video decoding from " << logType << " [" << logMessage
              << "] filled audio tensors";
402
403
404
405
406
407
408
409
410
411
    } else {
      VLOG(1) << "Miss audio stream";
    }
  }

  torch::List<torch::Tensor> result;
  result.push_back(std::move(videoFrame));
  result.push_back(std::move(videoFramePts));
  result.push_back(std::move(videoTimeBase));
  result.push_back(std::move(videoFps));
412
  result.push_back(std::move(videoDuration));
413
414
415
416
  result.push_back(std::move(audioFrame));
  result.push_back(std::move(audioFramePts));
  result.push_back(std::move(audioTimeBase));
  result.push_back(std::move(audioSampleRate));
417
  result.push_back(std::move(audioDuration));
418

419
420
421
  VLOG(1) << "Video decoding from " << logType << " [" << logMessage
          << "] about to return";

422
423
424
  return result;
}

425
426
427
428
torch::List<torch::Tensor> probeVideo(
    bool isReadFile,
    const torch::Tensor& input_video,
    std::string videoPath) {
429
430
431
  DecoderParameters params = getDecoderParams(
      0, // videoStartUs
      -1, // videoEndUs
432
      0, // seekFrameMargin
433
      1, // getPtsOnly
434
435
436
437
      1, // readVideoStream
      0, // width
      0, // height
      0, // minDimension
438
      0, // maxDimension
439
440
      1, // readAudioStream
      0, // audioSamples
441
      0 // audioChannels
442
443
  );

444
445
446
  SyncDecoder decoder;
  DecoderInCallback callback = nullptr;
  std::string logMessage, logType;
447
  if (isReadFile) {
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
    params.uri = videoPath;
    logType = "file";
    logMessage = videoPath;
  } else {
    callback = MemoryBuffer::getCallback(
        input_video.data_ptr<uint8_t>(), input_video.size(0));
    logType = "memory";
    logMessage = std::to_string(input_video.size(0));
  }

  VLOG(1) << "Video probing from " << logType << " [" << logMessage
          << "] has started";

  const auto now = std::chrono::system_clock::now();

  bool succeeded;
  bool gotAudio = false, gotVideo = false;
  DecoderMetadata audioMetadata, videoMetadata;
  std::vector<DecoderMetadata> metadata;
  if ((succeeded = decoder.init(params, std::move(callback), &metadata))) {
    for (const auto& header : metadata) {
      if (header.format.type == TYPE_VIDEO) {
        gotVideo = true;
        videoMetadata = header;
      } else if (header.format.type == TYPE_AUDIO) {
        gotAudio = true;
        audioMetadata = header;
      }
    }
    const auto then = std::chrono::system_clock::now();
    VLOG(1) << "Video probing from " << logType << " [" << logMessage
            << "] has finished, "
            << std::chrono::duration_cast<std::chrono::microseconds>(then - now)
                   .count()
            << " us";
483
  } else {
484
    LOG(ERROR) << "Decoder initialization has failed";
485
  }
486
487
488

  decoder.shutdown();

489
490
491
492
493
  // video section
  torch::Tensor videoTimeBase = torch::zeros({0}, torch::kInt);
  torch::Tensor videoFps = torch::zeros({0}, torch::kFloat);
  torch::Tensor videoDuration = torch::zeros({0}, torch::kLong);

494
  if (succeeded && gotVideo) {
495
496
    videoTimeBase = torch::zeros({2}, torch::kInt);
    int* videoTimeBaseData = videoTimeBase.data_ptr<int>();
497
498
499
500
    const auto& header = videoMetadata;

    videoTimeBaseData[0] = header.num;
    videoTimeBaseData[1] = header.den;
501
502
503

    videoFps = torch::zeros({1}, torch::kFloat);
    float* videoFpsData = videoFps.data_ptr<float>();
504
    videoFpsData[0] = header.fps;
505
506
507

    videoDuration = torch::zeros({1}, torch::kLong);
    int64_t* videoDurationData = videoDuration.data_ptr<int64_t>();
508
509
    AVRational avr = AVRational{(int)header.num, (int)header.den};
    videoDurationData[0] = av_rescale_q(header.duration, timeBaseQ, avr);
510
511
512
513
514
515

    VLOG(2) << "Prob fps: " << header.fps << ", duration: " << header.duration
            << ", num: " << header.num << ", den: " << header.den;

    VLOG(1) << "Video probing from " << logType << " [" << logMessage
            << "] filled video tensors";
516
  } else {
517
    LOG(ERROR) << "Miss video stream";
518
519
520
521
522
523
524
  }

  // audio section
  torch::Tensor audioTimeBase = torch::zeros({0}, torch::kInt);
  torch::Tensor audioSampleRate = torch::zeros({0}, torch::kInt);
  torch::Tensor audioDuration = torch::zeros({0}, torch::kLong);

525
  if (succeeded && gotAudio) {
526
527
    audioTimeBase = torch::zeros({2}, torch::kInt);
    int* audioTimeBaseData = audioTimeBase.data_ptr<int>();
528
529
530
531
532
533
    const auto& header = audioMetadata;
    const auto& media = header.format;
    const auto& format = media.format.audio;

    audioTimeBaseData[0] = header.num;
    audioTimeBaseData[1] = header.den;
534
535
536

    audioSampleRate = torch::zeros({1}, torch::kInt);
    int* audioSampleRateData = audioSampleRate.data_ptr<int>();
537
    audioSampleRateData[0] = format.samples;
538
539
540

    audioDuration = torch::zeros({1}, torch::kLong);
    int64_t* audioDurationData = audioDuration.data_ptr<int64_t>();
541
542
    AVRational avr = AVRational{(int)header.num, (int)header.den};
    audioDurationData[0] = av_rescale_q(header.duration, timeBaseQ, avr);
543
544
545
546
547
548
549

    VLOG(2) << "Prob sample rate: " << format.samples
            << ", duration: " << header.duration << ", num: " << header.num
            << ", den: " << header.den;

    VLOG(1) << "Video probing from " << logType << " [" << logMessage
            << "] filled audio tensors";
550
551
552
553
554
555
556
557
558
559
560
561
  } else {
    VLOG(1) << "Miss audio stream";
  }

  torch::List<torch::Tensor> result;
  result.push_back(std::move(videoTimeBase));
  result.push_back(std::move(videoFps));
  result.push_back(std::move(videoDuration));
  result.push_back(std::move(audioTimeBase));
  result.push_back(std::move(audioSampleRate));
  result.push_back(std::move(audioDuration));

562
563
564
  VLOG(1) << "Video probing from " << logType << " [" << logMessage
          << "] is about to return";

565
566
567
  return result;
}

568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
} // namespace

torch::List<torch::Tensor> read_video_from_memory(
    torch::Tensor input_video,
    double seekFrameMargin,
    int64_t getPtsOnly,
    int64_t readVideoStream,
    int64_t width,
    int64_t height,
    int64_t minDimension,
    int64_t maxDimension,
    int64_t videoStartPts,
    int64_t videoEndPts,
    int64_t videoTimeBaseNum,
    int64_t videoTimeBaseDen,
    int64_t readAudioStream,
    int64_t audioSamples,
    int64_t audioChannels,
    int64_t audioStartPts,
    int64_t audioEndPts,
    int64_t audioTimeBaseNum,
    int64_t audioTimeBaseDen) {
Kai Zhang's avatar
Kai Zhang committed
590
591
  C10_LOG_API_USAGE_ONCE(
      "torchvision.csrc.io.video_reader.video_reader.read_video_from_memory");
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
  return readVideo(
      false,
      input_video,
      "", // videoPath
      seekFrameMargin,
      getPtsOnly,
      readVideoStream,
      width,
      height,
      minDimension,
      maxDimension,
      videoStartPts,
      videoEndPts,
      videoTimeBaseNum,
      videoTimeBaseDen,
      readAudioStream,
      audioSamples,
      audioChannels,
      audioStartPts,
      audioEndPts,
      audioTimeBaseNum,
      audioTimeBaseDen);
}

torch::List<torch::Tensor> read_video_from_file(
    std::string videoPath,
    double seekFrameMargin,
    int64_t getPtsOnly,
    int64_t readVideoStream,
    int64_t width,
    int64_t height,
    int64_t minDimension,
    int64_t maxDimension,
    int64_t videoStartPts,
    int64_t videoEndPts,
    int64_t videoTimeBaseNum,
    int64_t videoTimeBaseDen,
    int64_t readAudioStream,
    int64_t audioSamples,
    int64_t audioChannels,
    int64_t audioStartPts,
    int64_t audioEndPts,
    int64_t audioTimeBaseNum,
    int64_t audioTimeBaseDen) {
Kai Zhang's avatar
Kai Zhang committed
636
637
  C10_LOG_API_USAGE_ONCE(
      "torchvision.csrc.io.video_reader.video_reader.read_video_from_file");
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
  torch::Tensor dummy_input_video = torch::ones({0});
  return readVideo(
      true,
      dummy_input_video,
      videoPath,
      seekFrameMargin,
      getPtsOnly,
      readVideoStream,
      width,
      height,
      minDimension,
      maxDimension,
      videoStartPts,
      videoEndPts,
      videoTimeBaseNum,
      videoTimeBaseDen,
      readAudioStream,
      audioSamples,
      audioChannels,
      audioStartPts,
      audioEndPts,
      audioTimeBaseNum,
      audioTimeBaseDen);
}

torch::List<torch::Tensor> probe_video_from_memory(torch::Tensor input_video) {
Kai Zhang's avatar
Kai Zhang committed
664
665
  C10_LOG_API_USAGE_ONCE(
      "torchvision.csrc.io.video_reader.video_reader.probe_video_from_memory");
666
667
668
  return probeVideo(false, input_video, "");
}

669
torch::List<torch::Tensor> probe_video_from_file(std::string videoPath) {
Kai Zhang's avatar
Kai Zhang committed
670
671
  C10_LOG_API_USAGE_ONCE(
      "torchvision.csrc.io.video_reader.video_reader.probe_video_from_file");
672
673
674
675
  torch::Tensor dummy_input_video = torch::ones({0});
  return probeVideo(true, dummy_input_video, videoPath);
}

676
TORCH_LIBRARY_FRAGMENT(video_reader, m) {
677
678
679
680
  m.def("read_video_from_memory", read_video_from_memory);
  m.def("read_video_from_file", read_video_from_file);
  m.def("probe_video_from_memory", probe_video_from_memory);
  m.def("probe_video_from_file", probe_video_from_file);
681
}
682
683
684

} // namespace video_reader
} // namespace vision