@@ -45,18 +45,19 @@ def packed_to_padded_python(inputs, first_idxs, max_size, device):
4545 PyTorch implementation of packed_to_padded function.
4646 """
4747 num_meshes = first_idxs .size (0 )
48- D = inputs .shape [1 ] if inputs .dim () == 2 else 0
49- if D == 0 :
48+ if inputs .dim () == 1 :
5049 inputs_padded = torch .zeros ((num_meshes , max_size ), device = device )
5150 else :
52- inputs_padded = torch .zeros ((num_meshes , max_size , D ), device = device )
51+ inputs_padded = torch .zeros (
52+ (num_meshes , max_size , * inputs .shape [1 :]), device = device
53+ )
5354 for m in range (num_meshes ):
5455 s = first_idxs [m ]
5556 if m == num_meshes - 1 :
5657 f = inputs .shape [0 ]
5758 else :
5859 f = first_idxs [m + 1 ]
59- inputs_padded [m , :f ] = inputs [s :f ]
60+ inputs_padded [m , : f - s ] = inputs [s :f ]
6061
6162 return inputs_padded
6263
@@ -66,22 +67,21 @@ def padded_to_packed_python(inputs, first_idxs, num_inputs, device):
6667 PyTorch implementation of padded_to_packed function.
6768 """
6869 num_meshes = inputs .size (0 )
69- D = inputs .shape [2 ] if inputs .dim () == 3 else 0
70- if D == 0 :
70+ if inputs .dim () == 2 :
7171 inputs_packed = torch .zeros ((num_inputs ,), device = device )
7272 else :
73- inputs_packed = torch .zeros ((num_inputs , D ), device = device )
73+ inputs_packed = torch .zeros ((num_inputs , * inputs . shape [ 2 :] ), device = device )
7474 for m in range (num_meshes ):
7575 s = first_idxs [m ]
7676 if m == num_meshes - 1 :
7777 f = num_inputs
7878 else :
7979 f = first_idxs [m + 1 ]
80- inputs_packed [s :f ] = inputs [m , :f ]
80+ inputs_packed [s :f ] = inputs [m , : f - s ]
8181
8282 return inputs_packed
8383
84- def _test_packed_to_padded_helper (self , D , device ):
84+ def _test_packed_to_padded_helper (self , dims , device ):
8585 """
8686 Check the results from packed_to_padded and PyTorch implementations
8787 are the same.
@@ -91,10 +91,12 @@ def _test_packed_to_padded_helper(self, D, device):
9191 mesh_to_faces_packed_first_idx = meshes .mesh_to_faces_packed_first_idx ()
9292 max_faces = meshes .num_faces_per_mesh ().max ().item ()
9393
94- if D == 0 :
94+ if len ( dims ) == 0 :
9595 values = torch .rand ((faces .shape [0 ],), device = device , requires_grad = True )
9696 else :
97- values = torch .rand ((faces .shape [0 ], D ), device = device , requires_grad = True )
97+ values = torch .rand (
98+ (faces .shape [0 ], * dims ), device = device , requires_grad = True
99+ )
98100 values_torch = values .detach ().clone ()
99101 values_torch .requires_grad = True
100102 values_padded = packed_to_padded (
@@ -107,10 +109,10 @@ def _test_packed_to_padded_helper(self, D, device):
107109 self .assertClose (values_padded , values_padded_torch )
108110
109111 # check backward
110- if D == 0 :
112+ if len ( dims ) == 0 :
111113 grad_inputs = torch .rand ((len (meshes ), max_faces ), device = device )
112114 else :
113- grad_inputs = torch .rand ((len (meshes ), max_faces , D ), device = device )
115+ grad_inputs = torch .rand ((len (meshes ), max_faces , * dims ), device = device )
114116 values_padded .backward (grad_inputs )
115117 grad_outputs = values .grad
116118 values_padded_torch .backward (grad_inputs )
@@ -122,27 +124,41 @@ def _test_packed_to_padded_helper(self, D, device):
122124 self .assertClose (grad_outputs , grad_outputs_torch2 )
123125
124126 def test_packed_to_padded_flat_cpu (self ):
125- self ._test_packed_to_padded_helper (0 , "cpu" )
127+ self ._test_packed_to_padded_helper ([] , "cpu" )
126128
127129 def test_packed_to_padded_D1_cpu (self ):
128- self ._test_packed_to_padded_helper (1 , "cpu" )
130+ self ._test_packed_to_padded_helper ([ 1 ] , "cpu" )
129131
130132 def test_packed_to_padded_D16_cpu (self ):
131- self ._test_packed_to_padded_helper (16 , "cpu" )
133+ self ._test_packed_to_padded_helper ([16 ], "cpu" )
134+
135+ def test_packed_to_padded_D16_9_cpu (self ):
136+ self ._test_packed_to_padded_helper ([16 , 9 ], "cpu" )
137+
138+ def test_packed_to_padded_D16_3_2_cpu (self ):
139+ self ._test_packed_to_padded_helper ([16 , 3 , 2 ], "cpu" )
132140
133141 def test_packed_to_padded_flat_cuda (self ):
134142 device = get_random_cuda_device ()
135- self ._test_packed_to_padded_helper (0 , device )
143+ self ._test_packed_to_padded_helper ([] , device )
136144
137145 def test_packed_to_padded_D1_cuda (self ):
138146 device = get_random_cuda_device ()
139- self ._test_packed_to_padded_helper (1 , device )
147+ self ._test_packed_to_padded_helper ([ 1 ] , device )
140148
141149 def test_packed_to_padded_D16_cuda (self ):
142150 device = get_random_cuda_device ()
143- self ._test_packed_to_padded_helper (16 , device )
151+ self ._test_packed_to_padded_helper ([16 ], device )
152+
153+ def test_packed_to_padded_D16_9_cuda (self ):
154+ device = get_random_cuda_device ()
155+ self ._test_packed_to_padded_helper ([16 , 9 ], device )
156+
157+ def test_packed_to_padded_D16_3_2_cuda (self ):
158+ device = get_random_cuda_device ()
159+ self ._test_packed_to_padded_helper ([16 , 3 , 2 ], device )
144160
145- def _test_padded_to_packed_helper (self , D , device ):
161+ def _test_padded_to_packed_helper (self , dims , device ):
146162 """
147163 Check the results from packed_to_padded and PyTorch implementations
148164 are the same.
@@ -151,10 +167,10 @@ def _test_padded_to_packed_helper(self, D, device):
151167 mesh_to_faces_packed_first_idx = meshes .mesh_to_faces_packed_first_idx ()
152168 num_faces_per_mesh = meshes .num_faces_per_mesh ()
153169 max_faces = num_faces_per_mesh .max ().item ()
154- if D == 0 :
170+ if len ( dims ) == 0 :
155171 values = torch .rand ((len (meshes ), max_faces ), device = device )
156172 else :
157- values = torch .rand ((len (meshes ), max_faces , D ), device = device )
173+ values = torch .rand ((len (meshes ), max_faces , * dims ), device = device )
158174 for i , num in enumerate (num_faces_per_mesh ):
159175 values [i , num :] = 0
160176 values .requires_grad = True
@@ -173,11 +189,11 @@ def _test_padded_to_packed_helper(self, D, device):
173189 self .assertClose (values_packed , values_packed_torch )
174190
175191 # check backward
176- if D == 0 :
192+ if len ( dims ) == 0 :
177193 grad_inputs = torch .rand ((num_faces_per_mesh .sum ().item ()), device = device )
178194 else :
179195 grad_inputs = torch .rand (
180- (num_faces_per_mesh .sum ().item (), D ), device = device
196+ (num_faces_per_mesh .sum ().item (), * dims ), device = device
181197 )
182198 values_packed .backward (grad_inputs )
183199 grad_outputs = values .grad
@@ -190,41 +206,39 @@ def _test_padded_to_packed_helper(self, D, device):
190206 self .assertClose (grad_outputs , grad_outputs_torch2 )
191207
192208 def test_padded_to_packed_flat_cpu (self ):
193- self ._test_padded_to_packed_helper (0 , "cpu" )
209+ self ._test_padded_to_packed_helper ([] , "cpu" )
194210
195211 def test_padded_to_packed_D1_cpu (self ):
196- self ._test_padded_to_packed_helper (1 , "cpu" )
212+ self ._test_padded_to_packed_helper ([ 1 ] , "cpu" )
197213
198214 def test_padded_to_packed_D16_cpu (self ):
199- self ._test_padded_to_packed_helper (16 , "cpu" )
215+ self ._test_padded_to_packed_helper ([16 ], "cpu" )
216+
217+ def test_padded_to_packed_D16_9_cpu (self ):
218+ self ._test_padded_to_packed_helper ([16 , 9 ], "cpu" )
219+
220+ def test_padded_to_packed_D16_3_2_cpu (self ):
221+ self ._test_padded_to_packed_helper ([16 , 3 , 2 ], "cpu" )
200222
201223 def test_padded_to_packed_flat_cuda (self ):
202224 device = get_random_cuda_device ()
203- self ._test_padded_to_packed_helper (0 , device )
225+ self ._test_padded_to_packed_helper ([] , device )
204226
205227 def test_padded_to_packed_D1_cuda (self ):
206228 device = get_random_cuda_device ()
207- self ._test_padded_to_packed_helper (1 , device )
229+ self ._test_padded_to_packed_helper ([ 1 ] , device )
208230
209231 def test_padded_to_packed_D16_cuda (self ):
210232 device = get_random_cuda_device ()
211- self ._test_padded_to_packed_helper (16 , device )
212-
213- def test_invalid_inputs_shapes (self , device = "cuda:0" ):
214- with self .assertRaisesRegex (ValueError , "input can only be 2-dimensional." ):
215- values = torch .rand ((100 , 50 , 2 ), device = device )
216- first_idxs = torch .tensor ([0 , 80 ], dtype = torch .int64 , device = device )
217- packed_to_padded (values , first_idxs , 100 )
218-
219- with self .assertRaisesRegex (ValueError , "input can only be 3-dimensional." ):
220- values = torch .rand ((100 ,), device = device )
221- first_idxs = torch .tensor ([0 , 80 ], dtype = torch .int64 , device = device )
222- padded_to_packed (values , first_idxs , 20 )
223-
224- with self .assertRaisesRegex (ValueError , "input can only be 3-dimensional." ):
225- values = torch .rand ((100 , 50 , 2 , 2 ), device = device )
226- first_idxs = torch .tensor ([0 , 80 ], dtype = torch .int64 , device = device )
227- padded_to_packed (values , first_idxs , 20 )
233+ self ._test_padded_to_packed_helper ([16 ], device )
234+
235+ def test_padded_to_packed_D16_9_cuda (self ):
236+ device = get_random_cuda_device ()
237+ self ._test_padded_to_packed_helper ([16 , 9 ], device )
238+
239+ def test_padded_to_packed_D16_3_2_cuda (self ):
240+ device = get_random_cuda_device ()
241+ self ._test_padded_to_packed_helper ([16 , 3 , 2 ], device )
228242
229243 @staticmethod
230244 def packed_to_padded_with_init (
0 commit comments