#define NVDR_CHECK_DEVICE(...) do { TORCH_CHECK(at::cuda::check_device({__VA_ARGS__}), __func__, "(): Inputs " #__VA_ARGS__ " must reside on current GPU device") } while(0)
#define NVDR_CHECK_DEVICE(...) do { TORCH_CHECK(at::cuda::check_device({__VA_ARGS__}), __func__, "(): Inputs " #__VA_ARGS__ " must reside on the same GPU device") } while(0)
#define NVDR_CHECK_CPU(...) do { nvdr_check_cpu({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must reside on CPU"); } while(0)
#define NVDR_CHECK_CPU(...) do { nvdr_check_cpu({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must reside on CPU"); } while(0)
#define NVDR_CHECK_CONTIGUOUS(...) do { nvdr_check_contiguous({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be contiguous tensors"); } while(0)
#define NVDR_CHECK_CONTIGUOUS(...) do { nvdr_check_contiguous({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be contiguous tensors"); } while(0)
#define NVDR_CHECK_F32(...) do { nvdr_check_f32({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be float32 tensors"); } while(0)
#define NVDR_CHECK_F32(...) do { nvdr_check_f32({__VA_ARGS__}, __func__, "(): Inputs " #__VA_ARGS__ " must be float32 tensors"); } while(0)
NVDR_CHECK(uv_da.sizes().size()==4&&uv_da.size(0)==p.n&&uv_da.size(1)==p.imgHeight&&uv_da.size(2)==p.imgWidth&&uv_da.size(3)==4,"uv_da must have shape [minibatch_size, height, width, 4]");
NVDR_CHECK(uv_da.sizes().size()==4&&uv_da.size(0)==p.n&&uv_da.size(1)==p.imgHeight&&uv_da.size(2)==p.imgWidth&&uv_da.size(3)==4,"uv_da must have shape [minibatch_size, height, width, 4]");
else
else
NVDR_CHECK(uv_da.sizes().size()==4&&uv_da.size(0)==p.n&&uv_da.size(1)==p.imgHeight&&uv_da.size(2)==p.imgWidth&&uv_da.size(3)==6,"uv_da must have shape [minibatch_size, height, width, 6] in cube map mode");
NVDR_CHECK(uv_da.sizes().size()==4&&uv_da.size(0)==p.n&&uv_da.size(1)==p.imgHeight&&uv_da.size(2)==p.imgWidth&&uv_da.size(3)==6,"uv_da must have shape [minibatch_size, height, width, 6] in cube map mode");
}
}
if(has_mip_level_bias)
NVDR_CHECK(mip_level_bias.sizes().size()==3&&mip_level_bias.size(0)==p.n&&mip_level_bias.size(1)==p.imgHeight&&mip_level_bias.size(2)==p.imgWidth,"mip_level_bias must have shape [minibatch_size, height, width]");
NVDR_CHECK(uv_da.sizes().size()==4&&uv_da.size(0)==p.n&&uv_da.size(1)==p.imgHeight&&uv_da.size(2)==p.imgWidth&&uv_da.size(3)==4,"uv_da must have shape [minibatch_size, height, width, 4]");
NVDR_CHECK(uv_da.sizes().size()==4&&uv_da.size(0)==p.n&&uv_da.size(1)==p.imgHeight&&uv_da.size(2)==p.imgWidth&&uv_da.size(3)==4,"uv_da must have shape [minibatch_size, height, width, 4]");
else
else
NVDR_CHECK(uv_da.sizes().size()==4&&uv_da.size(0)==p.n&&uv_da.size(1)==p.imgHeight&&uv_da.size(2)==p.imgWidth&&uv_da.size(3)==6,"uv_da must have shape [minibatch_size, height, width, 6] in cube map mode");
NVDR_CHECK(uv_da.sizes().size()==4&&uv_da.size(0)==p.n&&uv_da.size(1)==p.imgHeight&&uv_da.size(2)==p.imgWidth&&uv_da.size(3)==6,"uv_da must have shape [minibatch_size, height, width, 6] in cube map mode");
}
}
if(has_mip_level_bias)
NVDR_CHECK(mip_level_bias.sizes().size()==3&&mip_level_bias.size(0)==p.n&&mip_level_bias.size(1)==p.imgHeight&&mip_level_bias.size(2)==p.imgWidth,"mip_level_bias must have shape [minibatch_size, height, width]");
}
NVDR_CHECK(dy.sizes().size()==4&&dy.size(0)==p.n&&dy.size(1)==p.imgHeight&&dy.size(2)==p.imgWidth&&dy.size(3)==p.channels,"dy must have shape [minibatch_size, height, width, channels]");
NVDR_CHECK(dy.sizes().size()==4&&dy.size(0)==p.n&&dy.size(1)==p.imgHeight&&dy.size(2)==p.imgWidth&&dy.size(3)==p.channels,"dy must have shape [minibatch_size, height, width, channels]");