Unverified Commit 9f10306b authored by Richard Barnes's avatar Richard Barnes Committed by GitHub
Browse files

[codemod] c10::optional -> std::optional in...

[codemod] c10::optional -> std::optional in pyspeech/experimental/csrc/decoders/TransducerDecoder.h +20

Differential Revision: D57294284

Pull Request resolved: https://github.com/pytorch/audio/pull/3793
parent b4407e07
...@@ -24,7 +24,7 @@ StreamingMediaDecoder ...@@ -24,7 +24,7 @@ StreamingMediaDecoder
.. doxygenclass:: torio::io::StreamingMediaDecoder .. doxygenclass:: torio::io::StreamingMediaDecoder
.. doxygenfunction:: torio::io::StreamingMediaDecoder::StreamingMediaDecoder(const std::string &src, const c10::optional<std::string> &format = {}, const c10::optional<OptionDict> &option = {}) .. doxygenfunction:: torio::io::StreamingMediaDecoder::StreamingMediaDecoder(const std::string &src, const std::optional<std::string> &format = {}, const c10::optional<OptionDict> &option = {})
StreamingMediaDecoderCustomIO StreamingMediaDecoderCustomIO
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
......
...@@ -24,7 +24,7 @@ StreamingMediaEncoder ...@@ -24,7 +24,7 @@ StreamingMediaEncoder
.. doxygenclass:: torio::io::StreamingMediaEncoder .. doxygenclass:: torio::io::StreamingMediaEncoder
.. doxygenfunction:: torio::io::StreamingMediaEncoder::StreamingMediaEncoder(const std::string &dst, const c10::optional<std::string> &format = {}) .. doxygenfunction:: torio::io::StreamingMediaEncoder::StreamingMediaEncoder(const std::string &dst, const std::optional<std::string> &format = {})
StreamingMediaEncoderCustomIO StreamingMediaEncoderCustomIO
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
......
...@@ -42,7 +42,7 @@ class RNNTLossFunction : public torch::autograd::Function<RNNTLossFunction> { ...@@ -42,7 +42,7 @@ class RNNTLossFunction : public torch::autograd::Function<RNNTLossFunction> {
} }
}; };
std::tuple<torch::Tensor, c10::optional<torch::Tensor>> rnnt_loss_autograd( std::tuple<torch::Tensor, std::optional<torch::Tensor>> rnnt_loss_autograd(
torch::Tensor& logits, torch::Tensor& logits,
const torch::Tensor& targets, const torch::Tensor& targets,
const torch::Tensor& logit_lengths, const torch::Tensor& logit_lengths,
......
#include <libtorchaudio/rnnt/compute.h> #include <libtorchaudio/rnnt/compute.h>
#include <torch/script.h> #include <torch/script.h>
std::tuple<torch::Tensor, c10::optional<torch::Tensor>> rnnt_loss( std::tuple<torch::Tensor, std::optional<torch::Tensor>> rnnt_loss(
torch::Tensor& logits, torch::Tensor& logits,
const torch::Tensor& targets, const torch::Tensor& targets,
const torch::Tensor& logit_lengths, const torch::Tensor& logit_lengths,
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#include <torch/script.h> #include <torch/script.h>
std::tuple<torch::Tensor, c10::optional<torch::Tensor>> rnnt_loss( std::tuple<torch::Tensor, std::optional<torch::Tensor>> rnnt_loss(
torch::Tensor& logits, torch::Tensor& logits,
const torch::Tensor& targets, const torch::Tensor& targets,
const torch::Tensor& logit_lengths, const torch::Tensor& logit_lengths,
......
...@@ -6,7 +6,7 @@ namespace rnnt { ...@@ -6,7 +6,7 @@ namespace rnnt {
namespace cpu { namespace cpu {
// Entry point into RNNT Loss // Entry point into RNNT Loss
std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute( std::tuple<torch::Tensor, std::optional<torch::Tensor>> compute(
torch::Tensor& logits, torch::Tensor& logits,
const torch::Tensor& targets, const torch::Tensor& targets,
const torch::Tensor& logit_lengths, const torch::Tensor& logit_lengths,
...@@ -89,7 +89,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute( ...@@ -89,7 +89,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute(
torch::Tensor costs = torch::empty( torch::Tensor costs = torch::empty(
options.batchSize_ * options.nHypos_, options.batchSize_ * options.nHypos_,
torch::TensorOptions().device(logits.device()).dtype(logits.dtype())); torch::TensorOptions().device(logits.device()).dtype(logits.dtype()));
c10::optional<torch::Tensor> gradients = torch::zeros_like(logits); std::optional<torch::Tensor> gradients = torch::zeros_like(logits);
torch::Tensor int_workspace = torch::empty( torch::Tensor int_workspace = torch::empty(
IntWorkspace::ComputeSizeFromOptions(options), IntWorkspace::ComputeSizeFromOptions(options),
......
...@@ -7,7 +7,7 @@ namespace rnnt { ...@@ -7,7 +7,7 @@ namespace rnnt {
namespace gpu { namespace gpu {
// Entry point into RNNT Loss // Entry point into RNNT Loss
std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute( std::tuple<torch::Tensor, std::optional<torch::Tensor>> compute(
torch::Tensor& logits, torch::Tensor& logits,
const torch::Tensor& targets, const torch::Tensor& targets,
const torch::Tensor& logit_lengths, const torch::Tensor& logit_lengths,
...@@ -92,7 +92,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute( ...@@ -92,7 +92,7 @@ std::tuple<torch::Tensor, c10::optional<torch::Tensor>> compute(
torch::Tensor costs = torch::empty( torch::Tensor costs = torch::empty(
options.batchSize_ * options.nHypos_, options.batchSize_ * options.nHypos_,
torch::TensorOptions().device(logits.device()).dtype(logits.dtype())); torch::TensorOptions().device(logits.device()).dtype(logits.dtype()));
c10::optional<torch::Tensor> gradients = torch::zeros_like(logits); std::optional<torch::Tensor> gradients = torch::zeros_like(logits);
torch::Tensor int_workspace = torch::empty( torch::Tensor int_workspace = torch::empty(
IntWorkspace::ComputeSizeFromOptions(options), IntWorkspace::ComputeSizeFromOptions(options),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment