diff --git a/moai/conf/model/parameters/groups/model1.yaml b/moai/conf/model/parameters/groups/model1.yaml new file mode 100644 index 0000000..fed13cd --- /dev/null +++ b/moai/conf/model/parameters/groups/model1.yaml @@ -0,0 +1,7 @@ +# @package model.parameters.groups.model1 + +_target_: moai.parameters.selectors.model.ModelParameterSelector +modules: null # optional +monads: null # optional +parameters: null # optional +force_grad: true \ No newline at end of file diff --git a/moai/conf/model/parameters/groups/model2.yaml b/moai/conf/model/parameters/groups/model2.yaml new file mode 100644 index 0000000..c5ea1cd --- /dev/null +++ b/moai/conf/model/parameters/groups/model2.yaml @@ -0,0 +1,7 @@ +# @package model.parameters.groups.model2 + +_target_: moai.parameters.selectors.model.ModelParameterSelector +modules: null # optional +monads: null # optional +parameters: null # optional +force_grad: true \ No newline at end of file diff --git a/moai/monads/render/nvdiffrast/render.py b/moai/monads/render/nvdiffrast/render.py index ddee422..0ef1269 100644 --- a/moai/monads/render/nvdiffrast/render.py +++ b/moai/monads/render/nvdiffrast/render.py @@ -22,6 +22,7 @@ def __init__( self.resolution = [height, width] self.decomposed = decomposed + @torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32) def forward( self, ndc_vertices: torch.Tensor, @@ -90,6 +91,7 @@ def __init__( ): super().__init__() + @torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32) def forward( self, attributes: torch.Tensor, @@ -114,6 +116,7 @@ def __init__( super().__init__() self.pos_grad_boost = position_gradient_scale + @torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32) def forward( self, attributes: torch.Tensor, diff --git a/moai/supervision/objectives/mesh/laplacian.py b/moai/supervision/objectives/mesh/laplacian.py index 3cbd858..ccc38c5 100644 --- a/moai/supervision/objectives/mesh/laplacian.py +++ b/moai/supervision/objectives/mesh/laplacian.py @@ -89,6 +89,7 @@ def __init__(self, method: str = "uniform"): super().__init__() self.method = method + @torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32) def forward( self, vertices: torch.Tensor, # [B, V, 3] @@ -109,9 +110,9 @@ def forward( # We don't want to backprop through the computation of the Laplacian; # just treat it as a magic constant matrix that is used to transform # verts into normals - with torch.no_grad(): + with torch.no_grad(), torch.amp.autocast(device_type="cuda", enabled=False): if self.method == "uniform": - L = laplacian(verts_packed, faces_packed) + L = laplacian(verts_packed.float(), faces_packed) elif self.method in ["cot", "cotcurv"]: L, inv_areas = cot_laplacian(verts_packed, faces_packed) if self.method == "cot":