"forward",[](InferEngine&self,constInferEngine::Input&input)->InferEngine::Output{returnself.forward(input);},"Run inference on all ranks with arbitrary arguments")
.def("forward",[](InferEngine&self,constInferEngine::Input&input)->InferEngine::Output{returnself.forward(input);},"Run inference on all ranks with arbitrary arguments")
.def("forward",[](InferEngine&self,constInferEngine::Input&input)->InferEngine::Output{returnself.forward(input);},"Run inference on all ranks with arbitrary arguments")
# Ideally this is solved by upgrading transformers. However, doing so causes version mismatch between transformers and mlu pytorch on devices with Phytium CPU. So a branch is temporarily used.