-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest.py
More file actions
125 lines (100 loc) · 3.51 KB
/
test.py
File metadata and controls
125 lines (100 loc) · 3.51 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import argparse
from pathlib import Path
import pickle
import numpy as np
import sys
import traceback
def load_model_file(path: Path):
"""Load pkl or npz model file and return a dict."""
if path.suffix.lower() == ".npz":
try:
npz = np.load(path, allow_pickle=True)
return {k: npz[k] for k in npz.files}
except Exception as e:
raise RuntimeError(f"Failed loading NPZ file: {e}")
elif path.suffix.lower() == ".pkl":
try:
with open(path, "rb") as f:
return pickle.load(f, encoding="latin1")
except Exception as e:
raise RuntimeError(f"Failed loading PKL file: {e}")
else:
raise RuntimeError(f"Unsupported model format: {path.name}")
def validate_model(model_dict, path: Path):
"""
Basic structural checks:
- must be a dict
- values should be numeric arrays or common SMPL data types
"""
if not isinstance(model_dict, dict):
raise ValueError(f"{path.name} → Loaded object is NOT a dict!")
count = 0
types = {}
for k, v in model_dict.items():
# Accept any np.array or list of numeric
if isinstance(v, np.ndarray):
continue
# Accept scalar numeric types
if isinstance(v, (int, float, np.integer, np.floating)):
continue
# Accept lists of numerics
if isinstance(v, list) and all(isinstance(x, (int, float)) for x in v):
continue
# Accept dict metadata or strings (some MANO/SMPL meta entries)
if isinstance(v, (dict, str, bytes, type(None))):
continue
if 'csc_matrix' in type(v).__name__:
continue
breakpoint()
# Other tpye, track number
t = type(v).__name__
types[t] = types.get(t, 0) + 1
count += 1
print(f" → {count} unrecognized entries with types: {types}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--models_path",
type=str,
default='models',
help="Path to the directory containing the converted models.",
)
args = parser.parse_args()
root = Path(args.models_path)
if not root.exists():
print(f"[ERROR] Path does not exist: {root}")
sys.exit(1)
print(f"\n=== TESTING MODELS IN: {root} ===\n")
model_files = []
for ext in ("*.pkl", "*.npz"):
model_files.extend(root.rglob(ext))
if not model_files:
print("[WARN] No .pkl or .npz model files found.")
sys.exit(0)
total = len(model_files)
failures = []
for f in model_files:
print(f"→ Testing: {f.relative_to(root)}")
try:
data = load_model_file(f)
validate_model(data, f)
except Exception as e:
failures.append((f, traceback.format_exc()))
print(f" ✗ ERROR loading {f.name}")
continue
print(f" ✓ OK")
# ---------- Summary ----------
print("\n================================")
print(" SUMMARY")
print("================================\n")
print(f"Total model files tested: {total}")
print(f"Load/structure failures: {len(failures)}")
if failures:
print("\n--- FAILURES ---")
for f, msg in failures:
print(f"\nFile: {f}\n{msg}")
if not failures:
print("\nAll models loaded correctly.")
else:
print("\nSome models failed. Review logs above.")
print("\n=== DONE ===")