Skip to content

Commit

Permalink
Polish var name.
Browse files Browse the repository at this point in the history
  • Loading branch information
JackyTown committed Aug 22, 2020
1 parent 46c57fa commit 32e4aa2
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 21 deletions.
38 changes: 19 additions & 19 deletions mmaction/models/heads/ssn_head.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def forward(self, x, scale_factors):
Args:
x (torch.Tensor): The input data.
scale_factors (list): Ratios of the effective sampling lengths
to augment lengths.
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor]:
Expand All @@ -90,12 +90,12 @@ def extract_stage_feature(stage_feat, stage_parts, num_multiplier,
stage_parts (tuple): Config of STPP.
num_multiplier (int): Total number of parts in the stage.
scale_factors (list): Ratios of the effective sampling lengths
to augment lengths.
to augmented lengths.
Returns:
torch.Tensor: Features of the stage.
"""
stage_stpp = []
stage_stpp_feat = []
stage_len = stage_feat.size(1)
for stage_part in stage_parts:
ticks = torch.arange(0, stage_len + 1e-5,
Expand All @@ -108,22 +108,22 @@ def extract_stage_feature(stage_feat, stage_parts, num_multiplier,
if scale_factors is not None:
part_feat = (
part_feat * scale_factors.view(num_samples, 1))
stage_stpp.append(part_feat)
return stage_stpp
stage_stpp_feat.append(part_feat)
return stage_stpp_feat

feature_parts = []
feature_parts.extend(
stage_stpp_feats = []
stage_stpp_feats.extend(
extract_stage_feature(x[:, :x0, :], self.stpp_stages[0],
self.multiplier_list[0], scale_factors[:,
0]))
feature_parts.extend(
stage_stpp_feats.extend(
extract_stage_feature(x[:, x0:x1, :], self.stpp_stages[1],
self.multiplier_list[1], None))
feature_parts.extend(
stage_stpp_feats.extend(
extract_stage_feature(x[:, x1:, :], self.stpp_stages[2],
self.multiplier_list[2], scale_factors[:,
1]))
stpp_feat = torch.cat(feature_parts, dim=1)
stpp_feat = torch.cat(stage_stpp_feats, dim=1)

if self.with_context:
return stpp_feat, stpp_feat
Expand Down Expand Up @@ -190,7 +190,7 @@ def forward(self, x, proposal_ticks, scale_factors):
x (torch.Tensor): The input data.
proposal_ticks (list): Ticks of proposals to be STPP.
scale_factors (list): Ratios of the effective sampling lengths
to augment lengths.
to augmented lengths.
Returns:
tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
Expand Down Expand Up @@ -227,7 +227,7 @@ def pyramids_pooling(out_scores, index, raw_scores, ticks,
raw_scores (torch.Tensor): Raw scores before STPP.
ticks (int): Ticks of raw scores.
scale_factors (list): Ratios of the effective sampling lengths
to augment lengths.
to augmented lengths.
score_len
stpp_stage (tuple): Config of STPP.
"""
Expand All @@ -240,13 +240,13 @@ def pyramids_pooling(out_scores, index, raw_scores, ticks,
else:
scale_factor = 1.0

num_stages = sum(stage_cfg)
sum_parts = sum(stage_cfg)
tick_left = float(ticks[stage_idx])
tick_right = float(
max(ticks[stage_idx] + 1, ticks[stage_idx + 1]))

if tick_right <= 0 or tick_left >= raw_scores.size(0):
offset += num_stages
offset += sum_parts
continue
for num_parts in stage_cfg:
part_ticks = torch.arange(
Expand Down Expand Up @@ -413,12 +413,12 @@ def forward(self, x, test_mode=False):
activity_scores = self.activity_fc(activity_feat)
complete_scores = self.completeness_fc(completeness_feat)
if self.with_regression:
bbox_pred = self.regressor_fc(completeness_feat)
bbox_preds = self.regressor_fc(completeness_feat)
else:
bbox_pred = None
bbox_pred = bbox_pred.view(-1, self.completeness_fc.out_features,
2)
return activity_scores, complete_scores, bbox_pred
bbox_preds = None
bbox_preds = bbox_preds.view(-1, self.completeness_fc.out_features,
2)
return activity_scores, complete_scores, bbox_preds
else:
x, proposal_tick_list, scale_factor_list = x
test_scores = self.test_fc(x)
Expand Down
4 changes: 2 additions & 2 deletions mmaction/models/localizers/ssn.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,10 @@ def forward_train(self, imgs, proposal_scale_factor, proposal_type,
if self.dropout is not None:
x = self.dropout(x)

activity_score, completeness_score, bbox_pred = self.cls_head(
activity_scores, completeness_scores, bbox_preds = self.cls_head(
(x, proposal_scale_factor))

loss = self.loss_cls(activity_score, completeness_score, bbox_pred,
loss = self.loss_cls(activity_scores, completeness_scores, bbox_preds,
proposal_type, proposal_labels, reg_targets,
self.train_cfg)
loss_dict = dict(**loss)
Expand Down

0 comments on commit 32e4aa2

Please sign in to comment.