Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Fix] remove tqdm & fix posec3d #1009

Merged
merged 1 commit into from
Jul 11, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion demo/demo_posec3d.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,8 @@ def main():
modality='Pose',
total_frames=num_frame)
num_person = max([len(x) for x in pose_results])
num_keypoint = pose_results[0][0]['keypoints'].shape[0]
# Current PoseC3D models are trained on COCO-keypoints (17 keypoints)
num_keypoint = 17
keypoint = np.zeros((num_person, num_frame, num_keypoint, 2),
dtype=np.float16)
keypoint_score = np.zeros((num_person, num_frame, num_keypoint),
Expand Down
10 changes: 7 additions & 3 deletions demo/demo_spatiotemporal_det.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
import torch
from mmcv import DictAction
from mmcv.runner import load_checkpoint
from tqdm import tqdm

from mmaction.models import build_detector
from mmaction.utils import import_module_error_func
Expand Down Expand Up @@ -225,11 +224,13 @@ def detection_inference(args, frame_paths):
'trained on COCO')
results = []
print('Performing Human Detection for each frame')
for frame_path in tqdm(frame_paths):
prog_bar = mmcv.ProgressBar(len(frame_paths))
for frame_path in frame_paths:
result = inference_detector(model, frame_path)
# We only keep human detections with score larger than det_score_thr
result = result[0][result[0][:, 4] >= args.det_score_thr]
results.append(result)
prog_bar.update()
return results


Expand Down Expand Up @@ -355,7 +356,9 @@ def main():
predictions = []

print('Performing SpatioTemporal Action Detection for each clip')
for timestamp, proposal in tqdm(zip(timestamps, human_detections)):
assert len(timestamps) == len(human_detections)
prog_bar = mmcv.ProgressBar(len(timestamps))
for timestamp, proposal in zip(timestamps, human_detections):
if proposal.shape[0] == 0:
predictions.append(None)
continue
Expand Down Expand Up @@ -389,6 +392,7 @@ def main():
prediction[j].append((label_map[i + 1], result[i][j,
4]))
predictions.append(prediction)
prog_bar.update()

results = []
for human_detection, prediction in zip(human_detections, predictions):
Expand Down
2 changes: 1 addition & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,6 @@ line_length = 79
multi_line_output = 0
known_standard_library = pkg_resources,setuptools
known_first_party = mmaction
known_third_party = cv2,decord,einops,joblib,matplotlib,mmcv,numpy,pandas,pytest,scipy,seaborn,titlecase,torch,tqdm,webcolors
known_third_party = cv2,decord,einops,joblib,matplotlib,mmcv,numpy,pandas,pytest,scipy,seaborn,titlecase,torch,webcolors
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
7 changes: 4 additions & 3 deletions tools/analysis/check_videos.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@
from functools import partial
from multiprocessing import Manager, Pool, cpu_count

import mmcv
import numpy as np
from mmcv import Config, DictAction
from tqdm import tqdm

from mmaction.datasets import PIPELINES, build_dataset

Expand Down Expand Up @@ -134,8 +134,9 @@ def _do_check_videos(lock, dataset, output_file, idx):
ids = range(len(dataset))

# start checking
for _ in tqdm(pool.imap_unordered(worker_fn, ids), total=len(ids)):
pass
prog_bar = mmcv.ProgressBar(len(dataset))
for _ in pool.imap_unordered(worker_fn, ids):
prog_bar.update()
pool.close()
pool.join()

Expand Down