Skip to content

Commit

Permalink
fix: add autocast for float32 only ops
Browse files Browse the repository at this point in the history
  • Loading branch information
nmvrs committed Jan 22, 2025
1 parent f658c99 commit 18b5381
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 2 deletions.
7 changes: 7 additions & 0 deletions moai/conf/model/parameters/groups/model1.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# @package model.parameters.groups.model1

_target_: moai.parameters.selectors.model.ModelParameterSelector
modules: null # optional
monads: null # optional
parameters: null # optional
force_grad: true
7 changes: 7 additions & 0 deletions moai/conf/model/parameters/groups/model2.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# @package model.parameters.groups.model2

_target_: moai.parameters.selectors.model.ModelParameterSelector
modules: null # optional
monads: null # optional
parameters: null # optional
force_grad: true
3 changes: 3 additions & 0 deletions moai/monads/render/nvdiffrast/render.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ def __init__(
self.resolution = [height, width]
self.decomposed = decomposed

@torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32)
def forward(
self,
ndc_vertices: torch.Tensor,
Expand Down Expand Up @@ -90,6 +91,7 @@ def __init__(
):
super().__init__()

@torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32)
def forward(
self,
attributes: torch.Tensor,
Expand All @@ -114,6 +116,7 @@ def __init__(
super().__init__()
self.pos_grad_boost = position_gradient_scale

@torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32)
def forward(
self,
attributes: torch.Tensor,
Expand Down
5 changes: 3 additions & 2 deletions moai/supervision/objectives/mesh/laplacian.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ def __init__(self, method: str = "uniform"):
super().__init__()
self.method = method

@torch.amp.custom_fwd(device_type="cuda", cast_inputs=torch.float32)
def forward(
self,
vertices: torch.Tensor, # [B, V, 3]
Expand All @@ -109,9 +110,9 @@ def forward(
# We don't want to backprop through the computation of the Laplacian;
# just treat it as a magic constant matrix that is used to transform
# verts into normals
with torch.no_grad():
with torch.no_grad(), torch.amp.autocast(device_type="cuda", enabled=False):
if self.method == "uniform":
L = laplacian(verts_packed, faces_packed)
L = laplacian(verts_packed.float(), faces_packed)
elif self.method in ["cot", "cotcurv"]:
L, inv_areas = cot_laplacian(verts_packed, faces_packed)
if self.method == "cot":
Expand Down

0 comments on commit 18b5381

Please sign in to comment.