Commit 72404de9 authored by moto's avatar moto Committed by Facebook GitHub Bot
Browse files

Add StreamWriter (#2628)

Summary:
This commit adds FFmpeg-based encoder StreamWriter class.
StreamWriter is pretty much the opposite of StreamReader class, and
it supports;

* Encoding audio / still image / video
* Exporting to local file / streaming protocol / devices etc...
* File-like object support (in later commit)
* HW video encoding (in later commit)

See also: https://fburl.com/gslide/z85kn5a9 (Meta internal)

Pull Request resolved: https://github.com/pytorch/audio/pull/2628

Reviewed By: nateanl

Differential Revision: D38816650

Pulled By: mthrok

fbshipit-source-id: a9343b0d55755e186971dc96fb86eb52daa003c8
parent 068fc29c
......@@ -80,7 +80,7 @@ fi
(
set -x
conda install -y -c conda-forge ${NUMBA_DEV_CHANNEL} 'librosa>=0.8.0' parameterized 'requests>=2.20'
pip install kaldi-io SoundFile coverage pytest pytest-cov 'scipy==1.7.3' transformers expecttest unidecode inflect Pillow sentencepiece pytorch-lightning 'protobuf<4.21.0' demucs
pip install kaldi-io SoundFile coverage pytest pytest-cov 'scipy==1.7.3' transformers expecttest unidecode inflect Pillow sentencepiece pytorch-lightning 'protobuf<4.21.0' demucs tinytag
)
# Install fairseq
git clone https://github.com/pytorch/fairseq
......
......@@ -88,7 +88,8 @@ esac
transformers \
unidecode \
'protobuf<4.21.0' \
demucs
demucs \
tinytag
)
# Install fairseq
git clone https://github.com/pytorch/fairseq
......
......@@ -33,3 +33,10 @@ StreamReaderOutputStream
.. autoclass:: StreamReaderOutputStream
:members:
StreamWriter
------------
.. autoclass:: StreamWriter
:members:
import torch
import torchaudio
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
is_ffmpeg_available,
nested_params,
rgb_to_yuv_ccir,
skipIfNoFFmpeg,
skipIfNoModule,
TempDirMixin,
TorchaudioTestCase,
)
if is_ffmpeg_available():
from torchaudio.io import StreamReader, StreamWriter
# TODO:
# Get rid of StreamReader and use synthetic data.
def get_audio_chunk(fmt, sample_rate, num_channels):
path = get_asset_path("nasa_13013.mp4")
s = StreamReader(path)
for _ in range(num_channels):
s.add_basic_audio_stream(-1, -1, format=fmt, sample_rate=sample_rate)
s.stream()
s.process_all_packets()
chunks = [chunk[:, :1] for chunk in s.pop_chunks()]
return torch.cat(chunks, 1)
def get_video_chunk(fmt, frame_rate, *, width, height):
path = get_asset_path("nasa_13013_no_audio.mp4")
s = StreamReader(path)
s.add_basic_video_stream(-1, -1, format=fmt, frame_rate=frame_rate, width=width, height=height)
s.stream()
s.process_all_packets()
(chunk,) = s.pop_chunks()
return chunk
@skipIfNoFFmpeg
class StreamWriterInterfaceTest(TempDirMixin, TorchaudioTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
torchaudio.utils.ffmpeg_utils.set_log_level(32)
@classmethod
def tearDownClass(cls):
torchaudio.utils.ffmpeg_utils.set_log_level(8)
super().tearDownClass()
def get_dst(self, path):
return self.get_temp_path(path)
def get_buf(self, path):
with open(self.get_temp_path(path), "rb") as fileobj:
return fileobj.read()
@skipIfNoModule("tinytag")
def test_metadata_overwrite(self):
"""When set_metadata is called multiple times, only entries from the last call are saved"""
from tinytag import TinyTag
src_fmt = "s16"
sample_rate = 8000
num_channels = 1
path = self.get_dst("test.mp3")
s = StreamWriter(path, format="mp3")
s.set_metadata(metadata={"artist": "torchaudio", "title": "foo"})
s.set_metadata(metadata={"title": self.id()})
s.add_audio_stream(sample_rate, num_channels, format=src_fmt)
chunk = get_audio_chunk(src_fmt, sample_rate, num_channels)
with s.open():
s.write_audio_chunk(0, chunk)
tag = TinyTag.get(path)
assert tag.artist is None
assert tag.title == self.id()
@nested_params(
# Note: "s64" causes UB (left shift of 1 by 63 places cannot be represented in type 'long')
# thus it's omitted.
["u8", "s16", "s32", "flt", "dbl"],
[8000, 16000, 44100],
[1, 2, 4],
)
def test_valid_audio_muxer_and_codecs_wav(self, src_fmt, sample_rate, num_channels):
"""Tensor of various dtypes can be saved as wav format."""
path = self.get_dst("test.wav")
s = StreamWriter(path, format="wav")
s.set_metadata(metadata={"artist": "torchaudio", "title": self.id()})
s.add_audio_stream(sample_rate, num_channels, format=src_fmt)
chunk = get_audio_chunk(src_fmt, sample_rate, num_channels)
with s.open():
s.write_audio_chunk(0, chunk)
@parameterized.expand(
[
("mp3", 8000, 1, "s32p", None),
("mp3", 16000, 2, "fltp", None),
("mp3", 44100, 1, "s16p", {"abr": "true"}),
("flac", 8000, 1, "s16", None),
("flac", 16000, 2, "s32", None),
("opus", 48000, 2, None, {"strict": "experimental"}),
("adts", 8000, 1, "fltp", None), # AAC format
]
)
def test_valid_audio_muxer_and_codecs(self, ext, sample_rate, num_channels, encoder_format, encoder_option):
"""Tensor of various dtypes can be saved as given format."""
path = self.get_dst(f"test.{ext}")
s = StreamWriter(path, format=ext)
s.set_metadata(metadata={"artist": "torchaudio", "title": self.id()})
s.add_audio_stream(sample_rate, num_channels, encoder_option=encoder_option, encoder_format=encoder_format)
chunk = get_audio_chunk("flt", sample_rate, num_channels)
with s.open():
s.write_audio_chunk(0, chunk)
@nested_params(
[
"gray8",
"rgb24",
"bgr24",
"yuv444p",
],
[(128, 64), (720, 576)],
)
def test_valid_video_muxer_and_codecs(self, src_format, size):
"""Image tensors of various formats can be saved as mp4"""
ext = "mp4"
frame_rate = 10
width, height = size
path = self.get_dst(f"test.{ext}")
s = StreamWriter(path, format=ext)
s.add_video_stream(frame_rate, width, height, format=src_format)
chunk = get_video_chunk(src_format, frame_rate, width=width, height=height)
with s.open():
s.write_video_chunk(0, chunk)
def test_valid_audio_video_muxer(self):
"""Audio/image tensors are saved as single video"""
ext = "mp4"
sample_rate = 16000
num_channels = 3
frame_rate = 30000 / 1001
width, height = 720, 576
video_fmt = "yuv444p"
path = self.get_dst(f"test.{ext}")
s = StreamWriter(path, format=ext)
s.set_metadata({"artist": "torchaudio", "title": self.id()})
s.add_audio_stream(sample_rate, num_channels)
s.add_video_stream(frame_rate, width, height, format=video_fmt)
audio = get_audio_chunk("flt", sample_rate, num_channels)
video = get_video_chunk(video_fmt, frame_rate, height=height, width=width)
with s.open():
s.write_audio_chunk(0, audio)
s.write_video_chunk(1, video)
@nested_params(
[
("gray8", "gray8"),
("rgb24", "rgb24"),
("bgr24", "bgr24"),
("yuv444p", "yuv444p"),
("rgb24", "yuv444p"),
("bgr24", "yuv444p"),
],
)
def test_video_raw_out(self, formats):
"""Verify that viedo out is correct with/without color space conversion"""
filename = "test.rawvideo"
frame_rate = 30000 / 1001
width, height = 720, 576
src_fmt, encoder_fmt = formats
frames = int(frame_rate * 2)
channels = 1 if src_fmt == "gray8" else 3
# Generate data
src_size = (frames, channels, height, width)
chunk = torch.randint(low=0, high=255, size=src_size, dtype=torch.uint8)
# Write data
dst = self.get_dst(filename)
s = StreamWriter(dst, format="rawvideo")
s.add_video_stream(frame_rate, width, height, format=src_fmt, encoder_format=encoder_fmt)
with s.open():
s.write_video_chunk(0, chunk)
# Fetch the written data
buf = self.get_buf(filename)
result = torch.frombuffer(buf, dtype=torch.uint8)
if encoder_fmt.endswith("p"):
result = result.reshape(src_size)
else:
result = result.reshape(frames, height, width, channels).permute(0, 3, 1, 2)
# check that they are same
if src_fmt == encoder_fmt:
expected = chunk
else:
if src_fmt == "bgr24":
chunk = chunk[:, [2, 1, 0], :, :]
expected = rgb_to_yuv_ccir(chunk)
self.assertEqual(expected, result, atol=1, rtol=0)
......@@ -149,6 +149,9 @@ if(USE_FFMPEG)
ffmpeg/stream_reader/stream_reader.cpp
ffmpeg/stream_reader/stream_reader_wrapper.cpp
ffmpeg/stream_reader/stream_reader_binding.cpp
ffmpeg/stream_writer/stream_writer.cpp
ffmpeg/stream_writer/stream_writer_wrapper.cpp
ffmpeg/stream_writer/stream_writer_binding.cpp
ffmpeg/utils.cpp
)
message(STATUS "FFMPEG_ROOT=$ENV{FFMPEG_ROOT}")
......
......@@ -47,10 +47,18 @@ void AVFormatInputContextDeleter::operator()(AVFormatContext* p) {
AVFormatInputContextPtr::AVFormatInputContextPtr(AVFormatContext* p)
: Wrapper<AVFormatContext, AVFormatInputContextDeleter>(p) {}
void AVFormatOutputContextDeleter::operator()(AVFormatContext* p) {
avformat_free_context(p);
};
AVFormatOutputContextPtr::AVFormatOutputContextPtr(AVFormatContext* p)
: Wrapper<AVFormatContext, AVFormatOutputContextDeleter>(p) {}
////////////////////////////////////////////////////////////////////////////////
// AVIO
////////////////////////////////////////////////////////////////////////////////
void AVIOContextDeleter::operator()(AVIOContext* p) {
avio_flush(p);
av_freep(&p->buffer);
av_freep(&p);
};
......
......@@ -93,6 +93,15 @@ struct AVFormatInputContextPtr
explicit AVFormatInputContextPtr(AVFormatContext* p);
};
struct AVFormatOutputContextDeleter {
void operator()(AVFormatContext* p);
};
struct AVFormatOutputContextPtr
: public Wrapper<AVFormatContext, AVFormatOutputContextDeleter> {
explicit AVFormatOutputContextPtr(AVFormatContext* p);
};
////////////////////////////////////////////////////////////////////////////////
// AVIO
////////////////////////////////////////////////////////////////////////////////
......
This diff is collapsed.
#pragma once
#include <torch/torch.h>
#include <torchaudio/csrc/ffmpeg/ffmpeg.h>
#include <torchaudio/csrc/ffmpeg/filter_graph.h>
namespace torchaudio {
namespace ffmpeg {
struct OutputStream {
AVStream* stream;
AVCodecContextPtr codec_ctx;
std::unique_ptr<FilterGraph> filter;
AVFramePtr src_frame;
AVFramePtr dst_frame;
// The number of samples written so far
int64_t num_frames;
// Audio-only: The maximum frames that frame can hold
int64_t frame_capacity;
};
class StreamWriter {
AVFormatOutputContextPtr pFormatContext;
std::vector<OutputStream> streams;
AVPacketPtr pkt;
public:
explicit StreamWriter(AVFormatOutputContextPtr&& p);
// Non-copyable
StreamWriter(const StreamWriter&) = delete;
StreamWriter& operator=(const StreamWriter&) = delete;
//////////////////////////////////////////////////////////////////////////////
// Query methods
//////////////////////////////////////////////////////////////////////////////
public:
// Print the configured outputs
void dump_format(int64_t i);
//////////////////////////////////////////////////////////////////////////////
// Configure methods
//////////////////////////////////////////////////////////////////////////////
public:
void add_audio_stream(
int64_t sample_rate,
int64_t num_channels,
const std::string& format,
const c10::optional<std::string>& encoder,
const c10::optional<OptionDict>& encoder_option,
const c10::optional<std::string>& encoder_format);
void add_video_stream(
double frame_rate,
int64_t width,
int64_t height,
const std::string& format,
const c10::optional<std::string>& encoder,
const c10::optional<OptionDict>& encoder_option,
const c10::optional<std::string>& encoder_format);
void set_metadata(const OptionDict& metadata);
private:
AVStream* add_stream(AVCodecContextPtr& ctx);
//////////////////////////////////////////////////////////////////////////////
// Write methods
//////////////////////////////////////////////////////////////////////////////
public:
void open(const c10::optional<OptionDict>& opt);
void close();
void write_audio_chunk(int i, const torch::Tensor& chunk);
void write_video_chunk(int i, const torch::Tensor& chunk);
void flush();
private:
void validate_stream(int i, enum AVMediaType);
void write_planar_video(
OutputStream& os,
const torch::Tensor& chunk,
int num_planes);
void write_interlaced_video(OutputStream& os, const torch::Tensor& chunk);
void process_frame(
AVFrame* src_frame,
std::unique_ptr<FilterGraph>& filter,
AVFrame* dst_frame,
AVCodecContextPtr& c,
AVStream* st);
void encode_frame(AVFrame* dst_frame, AVCodecContextPtr& c, AVStream* st);
void flush_stream(OutputStream& os);
};
} // namespace ffmpeg
} // namespace torchaudio
#include <torch/script.h>
#include <torchaudio/csrc/ffmpeg/stream_writer/stream_writer_wrapper.h>
namespace torchaudio {
namespace ffmpeg {
namespace {
c10::intrusive_ptr<StreamWriterBinding> init(
const std::string& dst,
const c10::optional<std::string>& format) {
return c10::make_intrusive<StreamWriterBinding>(
get_output_format_context(dst, format));
}
using S = const c10::intrusive_ptr<StreamWriterBinding>&;
TORCH_LIBRARY_FRAGMENT(torchaudio, m) {
m.class_<StreamWriterBinding>("ffmpeg_StreamWriter")
.def(torch::init<>(init))
.def(
"add_audio_stream",
[](S s,
int64_t sample_rate,
int64_t num_channels,
const std::string& format,
const c10::optional<std::string>& encoder,
const c10::optional<OptionDict>& encoder_option,
const c10::optional<std::string>& encoder_format) {
s->add_audio_stream(
sample_rate,
num_channels,
format,
encoder,
encoder_option,
encoder_format);
})
.def(
"add_video_stream",
[](S s,
double frame_rate,
int64_t width,
int64_t height,
const std::string& format,
const c10::optional<std::string>& encoder,
const c10::optional<OptionDict>& encoder_option,
const c10::optional<std::string>& encoder_format) {
s->add_video_stream(
frame_rate,
width,
height,
format,
encoder,
encoder_option,
encoder_format);
})
.def(
"set_metadata",
[](S s, const OptionDict& metadata) { s->set_metadata(metadata); })
.def("dump_format", [](S s, int64_t i) { s->dump_format(i); })
.def(
"open",
[](S s, const c10::optional<OptionDict>& option) { s->open(option); })
.def("close", [](S s) { s->close(); })
.def(
"write_audio_chunk",
[](S s, int64_t i, const torch::Tensor& chunk) {
s->write_audio_chunk(static_cast<int>(i), chunk);
})
.def(
"write_video_chunk",
[](S s, int64_t i, const torch::Tensor& chunk) {
s->write_video_chunk(static_cast<int>(i), chunk);
})
.def("flush", [](S s) { s->flush(); });
}
} // namespace
} // namespace ffmpeg
} // namespace torchaudio
#include <torchaudio/csrc/ffmpeg/stream_writer/stream_writer_wrapper.h>
namespace torchaudio {
namespace ffmpeg {
AVFormatOutputContextPtr get_output_format_context(
const std::string& dst,
const c10::optional<std::string>& format) {
AVFormatContext* p = avformat_alloc_context();
TORCH_CHECK(p, "Failed to allocate AVFormatContext.");
int ret = avformat_alloc_output_context2(
&p, nullptr, format ? format.value().c_str() : nullptr, dst.c_str());
TORCH_CHECK(
ret >= 0,
"Failed to open output \"",
dst,
"\" (",
av_err2string(ret),
").");
return AVFormatOutputContextPtr(p);
}
StreamWriterBinding::StreamWriterBinding(AVFormatOutputContextPtr&& p)
: StreamWriter(std::move(p)) {}
} // namespace ffmpeg
} // namespace torchaudio
#pragma once
#include <torchaudio/csrc/ffmpeg/stream_writer/stream_writer.h>
namespace torchaudio {
namespace ffmpeg {
// create format context for writing media
AVFormatOutputContextPtr get_output_format_context(
const std::string& dst,
const c10::optional<std::string>& format);
class StreamWriterBinding : public StreamWriter,
public torch::CustomClassHolder {
public:
explicit StreamWriterBinding(AVFormatOutputContextPtr&& p);
};
} // namespace ffmpeg
} // namespace torchaudio
import torchaudio
_LAZILY_IMPORTED = [
_STREAM_READER = [
"StreamReader",
"StreamReaderSourceStream",
"StreamReaderSourceAudioStream",
......@@ -8,15 +8,28 @@ _LAZILY_IMPORTED = [
"StreamReaderOutputStream",
]
_STREAM_WRITER = [
"StreamWriter",
]
_LAZILY_IMPORTED = _STREAM_READER + _STREAM_WRITER
def __getattr__(name: str):
if name in _LAZILY_IMPORTED:
torchaudio._extension._init_ffmpeg()
if name in _STREAM_READER:
from . import _stream_reader
item = getattr(_stream_reader, name)
else:
from . import _stream_writer
item = getattr(_stream_writer, name)
globals()[name] = item
return item
raise AttributeError(f"module {__name__} has no attribute {name}")
......
from typing import Dict, Optional
import torch
def _format_doc(**kwargs):
def decorator(obj):
obj.__doc__ = obj.__doc__.format(**kwargs)
return obj
return decorator
_encoder = """The name of the encoder to be used.
When provided, use the specified encoder instead of the default one.
To list the available encoders, you can use ``ffmpeg -h encoders`` command.
Default: ``None``."""
_encoder_option = """Options passed to encoder.
Mapping from str to str.
To list encoder options for a encoder, you can use
``ffmpeg -h encoder=<ENCODER>`` command.
Default: ``None``."""
_encoder_format = """Format used to encode media.
When encoder supports multiple formats, passing this argument will override
the format used for encoding.
To list supported formats for the encoder, you can use
``ffmpeg -h encoder=<ENCODER>`` command.
Default: ``None``."""
_format_common_args = _format_doc(
encoder=_encoder,
encoder_option=_encoder_option,
encoder_format=_encoder_format,
)
class StreamWriter:
"""Encode and write audio/video streams chunk by chunk
Args:
dst (str): The destination where the encoded data are written.
The supported value depends on the FFmpeg found in the system.
format (str or None, optional):
Override the output format, or specify the output media device.
Default: ``None`` (no override nor device output).
This argument serves two different use cases.
1) Override the output format.
This is useful when writing raw data or in a format different from the extension.
2) Specify the output device.
This allows to output media streams to hardware devices,
such as speaker and video screen.
.. note::
This option roughly corresponds to ``-f`` option of ``ffmpeg`` command.
Please refer to the ffmpeg documentations for possible values.
https://ffmpeg.org/ffmpeg-formats.html#Muxers
Use `ffmpeg -muxers` to list the values available in the current environment.
For device access, the available values vary based on hardware (AV device) and
software configuration (ffmpeg build).
Please refer to the ffmpeg documentations for possible values.
https://ffmpeg.org/ffmpeg-devices.html#Output-Devices
Use `ffmpeg -devices` to list the values available in the current environment.
"""
def __init__(
self,
dst: str,
format: Optional[str] = None,
):
self._s = torch.classes.torchaudio.ffmpeg_StreamWriter(dst, format)
self._is_open = False
@_format_common_args
def add_audio_stream(
self,
sample_rate: int,
num_channels: int,
format: str = "flt",
encoder: Optional[str] = None,
encoder_option: Optional[Dict[str, str]] = None,
encoder_format: Optional[str] = None,
):
"""Add an output audio stream.
Args:
sample_rate (int): The sample rate.
num_channels (int): The number of channels.
format (str, optional): Input sample format, which determines the dtype
of the input tensor.
- ``"u8"``: The input tensor must be ``torch.uint8`` type.
- ``"s16"``: The input tensor must be ``torch.int16`` type.
- ``"s32"``: The input tensor must be ``torch.int32`` type.
- ``"s64"``: The input tensor must be ``torch.int64`` type.
- ``"flt"``: The input tensor must be ``torch.float32`` type.
- ``"dbl"``: The input tensor must be ``torch.float64`` type.
Default: ``"flt"``.
encoder (str or None, optional): {encoder}
encoder_option (dict or None, optional): {encoder_option}
encoder_format (str or None, optional): {encoder_format}
"""
self._s.add_audio_stream(sample_rate, num_channels, format, encoder, encoder_option, encoder_format)
@_format_common_args
def add_video_stream(
self,
frame_rate: float,
width: int,
height: int,
format: str = "rgb24",
encoder: Optional[str] = None,
encoder_option: Optional[Dict[str, str]] = None,
encoder_format: Optional[str] = None,
):
"""Add an output video stream.
This method has to be called before `open` is called.
Args:
frame_rate (float): Frame rate of the video.
width (int): Width of the video frame.
height (int): Height of the video frame.
format (str, optional): Input pixel format, which determines the
color channel order of the input tensor.
- ``"gray8"``: One channel, grayscale.
- ``"rgb24"``: Three channels in the order of RGB.
- ``"bgr24"``: Three channels in the order of BGR.
- ``"yuv444p"``: Three channels in the order of YUV.
Default: ``"rgb24"``.
In either case, the input tensor has to be ``torch.uint8`` type and
the shape must be (frame, channel, height, width).
encoder (str or None, optional): {encoder}
encoder_option (dict or None, optional): {encoder_option}
encoder_format (str or None, optional): {encoder_format}
"""
self._s.add_video_stream(frame_rate, width, height, format, encoder, encoder_option, encoder_format)
def set_metadata(self, metadata: Dict[str, str]):
"""Set file-level metadata
Args:
metadata (dict or None, optional): File-level metadata.
"""
self._s.set_metadata(metadata)
def _print_output_stream(self, i: int):
"""[debug] Print the registered stream information to stdout."""
self._s.dump_format(i)
def open(self, option: Optional[Dict[str, str]] = None):
"""Open the output file / device and write the header.
Args:
option (dict or None, optional): Private options for protocol, device and muxer. See example.
Example - Protocol option
>>> s = StreamWriter(dst="rtmp://localhost:1234/live/app", format="flv")
>>> s.add_video_stream(...)
>>> # Passing protocol option `listen=1` makes StreamWriter act as RTMP server.
>>> with s.open(option={"listen": "1"}) as f:
>>> f.write_video_chunk(...)
Example - Device option
>>> s = StreamWriter("-", format="sdl")
>>> s.add_video_stream(..., encoder_format="rgb24")
>>> # Open SDL video player with fullscreen
>>> with s.open(option={"window_fullscreen": "1"}):
>>> f.write_video_chunk(...)
Example - Muxer option
>>> s = StreamWriter("foo.flac")
>>> s.add_audio_stream(...)
>>> s.set_metadata({"artist": "torchaudio contributors"})
>>> # FLAC muxer has a private option to not write the header.
>>> # The resulting file does not contain the above metadata.
>>> with s.open(option={"write_header": "false"}) as f:
>>> f.write_audio_chunk(...)
"""
if not self._is_open:
self._s.open(option)
self._is_open = True
return self
def close(self):
"""Close the output"""
if self._is_open:
self._s.close()
self._is_open = False
def write_audio_chunk(self, i: int, chunk: torch.Tensor):
"""Write the audio data
Args:
i (int): Stream index.
chunk (Tensor): Waveform tensor. Shape: `(frame, channel)`.
The ``dtype`` must match what was passed to :py:func:`add_audio_stream` method.
"""
self._s.write_audio_chunk(i, chunk)
def write_video_chunk(self, i: int, chunk: torch.Tensor):
"""Write the audio data
Args:
i (int): Stream index.
chunk (Tensor): Waveform tensor. Shape: `(frame, channel, height, width)`.
``dtype``: ``torch.uint8``.
"""
self._s.write_video_chunk(i, chunk)
def flush(self):
"""Flush the frames from encoders and write the frames to the destination."""
self._s.flush()
def __enter__(self):
"""Context manager so that the destination is closed and data are flushed automatically."""
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Context manager so that the destination is closed and data are flushed automatically."""
self.flush()
self.close()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment