Commit af6ebbae authored by John Lu's avatar John Lu Committed by Facebook GitHub Bot
Browse files

Replace `runtime_error` exception with `TORCH_CHECK` in TorchAudio ffmpeg dir (1/2) (#2550)

Summary:
`std::runtime_error` does not preserve the C++ stack trace, so it is unclear to users what went wrong internally.

PyTorch's `TORCH_CHECK` macro allows to print C++ stack trace when `TORCH_SHOW_CPP_STACKTRACES` environment variable is set to 1.

Pull Request resolved: https://github.com/pytorch/audio/pull/2550

Improves assertion for TorchAudio ffmpeg directory

Reviewed By: mthrok

Differential Revision: D37914953

fbshipit-source-id: 7704c41bb88b0616ae2e73961a5496bc0d95cf13
parent b53ff1b9
......@@ -68,8 +68,7 @@ void AVPacketDeleter::operator()(AVPacket* p) {
namespace {
AVPacket* get_av_packet() {
AVPacket* pPacket = av_packet_alloc();
if (!pPacket)
throw std::runtime_error("Failed to allocate AVPacket object.");
TORCH_CHECK(pPacket, "Failed to allocate AVPacket object.");
return pPacket;
}
} // namespace
......@@ -97,8 +96,7 @@ void AVFrameDeleter::operator()(AVFrame* p) {
namespace {
AVFrame* get_av_frame() {
AVFrame* pFrame = av_frame_alloc();
if (!pFrame)
throw std::runtime_error("Failed to allocate AVFrame object.");
TORCH_CHECK(pFrame, "Failed to allocate AVFrame object.");
return pFrame;
}
} // namespace
......@@ -142,8 +140,7 @@ void AVFilterGraphDeleter::operator()(AVFilterGraph* p) {
namespace {
AVFilterGraph* get_filter_graph() {
AVFilterGraph* ptr = avfilter_graph_alloc();
if (!ptr)
throw std::runtime_error("Failed to allocate resouce.");
TORCH_CHECK(ptr, "Failed to allocate resouce.");
return ptr;
}
} // namespace
......
......@@ -10,7 +10,7 @@ FilterGraph::FilterGraph(AVMediaType media_type) : media_type(media_type) {
case AVMEDIA_TYPE_VIDEO:
break;
default:
throw std::runtime_error("Only audio and video type is supported.");
TORCH_CHECK(false, "Only audio and video type is supported.");
}
}
......@@ -89,17 +89,14 @@ void FilterGraph::add_src(const std::string& args) {
media_type == AVMEDIA_TYPE_AUDIO ? "abuffer" : "buffer");
int ret = avfilter_graph_create_filter(
&buffersrc_ctx, buffersrc, "in", args.c_str(), NULL, pFilterGraph);
if (ret < 0) {
throw std::runtime_error(
"Failed to create input filter: \"" + args + "\" (" +
av_err2string(ret) + ")");
}
TORCH_CHECK(
ret >= 0,
"Failed to create input filter: \"" + args + "\" (" + av_err2string(ret) +
")");
}
void FilterGraph::add_sink() {
if (buffersink_ctx) {
throw std::runtime_error("Sink buffer is already allocated.");
}
TORCH_CHECK(!buffersink_ctx, "Sink buffer is already allocated.");
const AVFilter* buffersink = avfilter_get_by_name(
media_type == AVMEDIA_TYPE_AUDIO ? "abuffersink" : "buffersink");
// Note
......@@ -113,9 +110,7 @@ void FilterGraph::add_sink() {
// `abuffersink` should not take options, and this resolved issue.
int ret = avfilter_graph_create_filter(
&buffersink_ctx, buffersink, "out", nullptr, nullptr, pFilterGraph);
if (ret < 0) {
throw std::runtime_error("Failed to create output filter.");
}
TORCH_CHECK(ret >= 0, "Failed to create output filter.");
}
namespace {
......@@ -131,9 +126,7 @@ class InOuts {
public:
InOuts(const char* name, AVFilterContext* pCtx) {
p = avfilter_inout_alloc();
if (!p) {
throw std::runtime_error("Failed to allocate AVFilterInOut.");
}
TORCH_CHECK(p, "Failed to allocate AVFilterInOut.");
p->name = av_strdup(name);
p->filter_ctx = pCtx;
p->pad_idx = 0;
......@@ -160,19 +153,15 @@ void FilterGraph::add_process(const std::string& filter_description) {
int ret = avfilter_graph_parse_ptr(
pFilterGraph, filter_description.c_str(), out, in, nullptr);
if (ret < 0) {
throw std::runtime_error(
"Failed to create the filter from \"" + filter_description + "\" (" +
av_err2string(ret) + ".)");
}
TORCH_CHECK(
ret >= 0,
"Failed to create the filter from \"" + filter_description + "\" (" +
av_err2string(ret) + ".)");
}
void FilterGraph::create_filter() {
int ret = avfilter_graph_config(pFilterGraph, nullptr);
if (ret < 0) {
throw std::runtime_error(
"Failed to configure the graph: " + av_err2string(ret));
}
TORCH_CHECK(ret >= 0, "Failed to configure the graph: " + av_err2string(ret));
// char* desc = avfilter_graph_dump(pFilterGraph.get(), NULL);
// std::cerr << "Filter created:\n" << desc << std::endl;
// av_free(static_cast<void*>(desc));
......
......@@ -17,14 +17,13 @@ static int read_function(void* opaque, uint8_t* buf, int buf_size) {
if (chunk_len == 0) {
break;
}
if (chunk_len > request) {
std::ostringstream message;
message
<< "Requested up to " << request << " bytes but, "
<< "received " << chunk_len << " bytes. "
<< "The given object does not confirm to read protocol of file object.";
throw std::runtime_error(message.str());
}
TORCH_CHECK(
chunk_len <= request,
"Requested up to ",
request,
" bytes but, received ",
chunk_len,
" bytes. The given object does not confirm to read protocol of file object.");
memcpy(buf, chunk.data(), chunk_len);
buf += chunk_len;
num_read += static_cast<int>(chunk_len);
......@@ -43,9 +42,7 @@ static int64_t seek_function(void* opaque, int64_t offset, int whence) {
AVIOContextPtr get_io_context(FileObj* opaque, int buffer_size) {
uint8_t* buffer = static_cast<uint8_t*>(av_malloc(buffer_size));
if (!buffer) {
throw std::runtime_error("Failed to allocate buffer.");
}
TORCH_CHECK(buffer, "Failed to allocate buffer.");
// If avio_alloc_context succeeds, then buffer will be cleaned up by
// AVIOContextPtr destructor.
......@@ -61,7 +58,7 @@ AVIOContextPtr get_io_context(FileObj* opaque, int buffer_size) {
if (!av_io_ctx) {
av_freep(&buffer);
throw std::runtime_error("Failed to allocate AVIO context.");
TORCH_CHECK(false, "Failed to allocate AVIO context.");
}
return AVIOContextPtr{av_io_ctx};
}
......
......@@ -94,9 +94,10 @@ torch::Tensor convert_audio_tensor(AVFrame* pFrame) {
break;
}
default:
throw std::runtime_error(
TORCH_CHECK(
false,
"Unsupported audio format: " +
std::string(av_get_sample_fmt_name(format)));
std::string(av_get_sample_fmt_name(format)));
}
for (int i = 0; i < num_planes; ++i) {
memcpy(ptr, pFrame->extended_data[i], plane_size);
......@@ -323,34 +324,34 @@ torch::Tensor convert_nv12_cuda(AVFrame* pFrame, const torch::Device& device) {
uint8_t* tgt = y.data_ptr<uint8_t>();
CUdeviceptr src = (CUdeviceptr)pFrame->data[0];
int linesize = pFrame->linesize[0];
if (cudaSuccess !=
cudaMemcpy2D(
(void*)tgt,
width,
(const void*)src,
linesize,
width,
height,
cudaMemcpyDeviceToDevice)) {
throw std::runtime_error("Failed to copy Y plane to Cuda tensor.");
}
TORCH_CHECK(
cudaSuccess ==
cudaMemcpy2D(
(void*)tgt,
width,
(const void*)src,
linesize,
width,
height,
cudaMemcpyDeviceToDevice),
"Failed to copy Y plane to Cuda tensor.");
}
torch::Tensor uv = torch::empty({1, height / 2, width / 2, 2}, options);
{
uint8_t* tgt = uv.data_ptr<uint8_t>();
CUdeviceptr src = (CUdeviceptr)pFrame->data[1];
int linesize = pFrame->linesize[1];
if (cudaSuccess !=
cudaMemcpy2D(
(void*)tgt,
width,
(const void*)src,
linesize,
width,
height / 2,
cudaMemcpyDeviceToDevice)) {
throw std::runtime_error("Failed to copy UV plane to Cuda tensor.");
}
TORCH_CHECK(
cudaSuccess ==
cudaMemcpy2D(
(void*)tgt,
width,
(const void*)src,
linesize,
width,
height / 2,
cudaMemcpyDeviceToDevice),
"Failed to copy UV plane to Cuda tensor.");
}
// Upsample width and height
uv = uv.repeat_interleave(2, -2).repeat_interleave(2, -3);
......@@ -394,20 +395,23 @@ torch::Tensor convert_image_tensor(
return convert_nv12_cuda(pFrame, device);
case AV_PIX_FMT_P010:
case AV_PIX_FMT_P016:
throw std::runtime_error(
TORCH_CHECK(
false,
"Unsupported video format found in CUDA HW: " +
std::string(av_get_pix_fmt_name(sw_format)));
std::string(av_get_pix_fmt_name(sw_format)));
default:
throw std::runtime_error(
TORCH_CHECK(
false,
"Unexpected video format found in CUDA HW: " +
std::string(av_get_pix_fmt_name(sw_format)));
std::string(av_get_pix_fmt_name(sw_format)));
}
}
#endif
default:
throw std::runtime_error(
TORCH_CHECK(
false,
"Unexpected video format: " +
std::string(av_get_pix_fmt_name(format)));
std::string(av_get_pix_fmt_name(format)));
}
}
} // namespace
......
......@@ -22,13 +22,11 @@ AVCodecContextPtr get_decode_context(
} else {
ss << "Unsupported codec: \"" << decoder_name.value() << "\".";
}
throw std::runtime_error(ss.str());
TORCH_CHECK(pCodec, ss.str());
}
AVCodecContext* pCodecContext = avcodec_alloc_context3(pCodec);
if (!pCodecContext) {
throw std::runtime_error("Failed to allocate CodecContext.");
}
TORCH_CHECK(pCodecContext, "Failed to allocate CodecContext.");
return AVCodecContextPtr(pCodecContext);
}
......@@ -58,10 +56,11 @@ const AVCodecHWConfig* get_cuda_config(const AVCodec* pCodec) {
return config;
}
}
std::stringstream ss;
ss << "CUDA device was requested, but the codec \"" << pCodec->name
<< "\" is not supported.";
throw std::runtime_error(ss.str());
TORCH_CHECK(
false,
"CUDA device was requested, but the codec \"",
pCodec->name,
"\" is not supported.");
}
#endif
......@@ -72,10 +71,8 @@ void init_codec_context(
const torch::Device& device,
enum AVPixelFormat* pHwFmt) {
int ret = avcodec_parameters_to_context(pCodecContext, pParams);
if (ret < 0) {
throw std::runtime_error(
"Failed to set CodecContext parameter: " + av_err2string(ret));
}
TORCH_CHECK(
ret >= 0, "Failed to set CodecContext parameter: " + av_err2string(ret));
#ifdef USE_CUDA
// Enable HW Acceleration
......@@ -94,10 +91,8 @@ void init_codec_context(
ret = avcodec_open2(pCodecContext, pCodecContext->codec, &opts);
clean_up_dict(opts);
if (ret < 0) {
throw std::runtime_error(
"Failed to initialize CodecContext: " + av_err2string(ret));
}
TORCH_CHECK(
ret >= 0, "Failed to initialize CodecContext: " + av_err2string(ret));
if (pParams->codec_type == AVMEDIA_TYPE_AUDIO && !pParams->channel_layout)
pParams->channel_layout =
......
......@@ -18,9 +18,10 @@ std::unique_ptr<Buffer> get_buffer(
return std::unique_ptr<Buffer>(
new VideoBuffer(frames_per_chunk, num_chunks, device));
default:
throw std::runtime_error(
TORCH_CHECK(
false,
std::string("Unsupported media type: ") +
av_get_media_type_string(type));
av_get_media_type_string(type));
}
}
......@@ -47,7 +48,7 @@ std::unique_ptr<FilterGraph> get_filter_graph(
codecpar->sample_aspect_ratio);
break;
default:
throw std::runtime_error("Only audio/video are supported.");
TORCH_CHECK(false, "Only audio/video are supported.");
}
p->add_sink();
p->add_process(filter_description);
......
......@@ -28,7 +28,7 @@ KeyType StreamProcessor::add_stream(
case AVMEDIA_TYPE_VIDEO:
break;
default:
throw std::runtime_error("Only Audio and Video are supported");
TORCH_CHECK(false, "Only Audio and Video are supported");
}
KeyType key = current_key++;
sinks.emplace(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment