forked from hed-standard/hed-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathschema_script_util.py
More file actions
404 lines (320 loc) · 17.3 KB
/
schema_script_util.py
File metadata and controls
404 lines (320 loc) · 17.3 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
import os.path
from collections import defaultdict
from hed.schema import from_string, load_schema, from_dataframes
from hed.schema import hed_cache
from hed.errors import get_printable_issue_string, separate_issues, HedFileError
from hed.schema.schema_comparer import SchemaComparer
all_extensions = [".tsv", ".mediawiki", ".xml", ".json"]
def _is_prerelease_partner(base_schema) -> bool:
"""Return True if base_schema's withStandard partner is only resolvable from the prerelease cache.
When a library schema serialised with ``save_merged=False`` is reloaded, the loader
re-fetches the standard schema named in the ``withStandard`` header attribute. If
that version lives only in the prerelease subdirectory of the cache, the reload will
fail unless ``check_prerelease=True`` is forwarded. This helper detects that
condition by asking the cache whether the version is found without the prerelease
flag (not found → prerelease required).
Parameters:
base_schema (HedSchema): The schema to inspect.
Returns:
bool: True if ``withStandard`` is set and the version is absent from the
regular (non-prerelease) cache directory.
"""
with_standard = base_schema.with_standard
if not with_standard:
return False
return hed_cache.get_hed_version_path(with_standard, check_prerelease=False) is None
def validate_schema_object(base_schema, schema_name, check_for_warnings=False):
"""Validate a schema object by checking compliance and roundtrip conversion.
Tests the schema for compliance issues and validates that it can be successfully
converted to and reloaded from all four formats (MEDIAWIKI, XML, JSON, TSV).
Parameters:
base_schema (HedSchema): The schema object to validate.
schema_name (str): The name/path of the schema for error reporting.
check_for_warnings (bool): If True, include warnings in the validation. Default is False.
Returns:
list: A list of validation issue strings. Empty if no issues found.
"""
validation_issues = []
try:
issues = base_schema.check_compliance(check_for_warnings=check_for_warnings)
if issues and check_for_warnings:
errors, warnings = separate_issues(issues)
issues = errors + warnings # Ensure errors are listed before warnings
else:
errors = issues
if issues:
validation_issues.append(get_printable_issue_string(issues, title=schema_name))
if errors:
return validation_issues
# If the withStandard partner only exists in the prerelease cache, all unmerged
# reloads must pass check_prerelease=True or they will fail partner resolution.
check_prerelease = _is_prerelease_partner(base_schema)
for save_merged in (True, False):
label = "merged" if save_merged else "unmerged"
tagged_name = f"{schema_name} ({label})"
validation_issues += _roundtrip_all_formats(
base_schema, tagged_name, save_merged=save_merged, check_prerelease=check_prerelease
)
except HedFileError as e:
print(f"Saving/loading error: {schema_name} {e.message}")
error_text = e.message
if e.issues:
error_text = get_printable_issue_string(e.issues, title=schema_name)
validation_issues.append(error_text)
return validation_issues
def validate_schema(file_path, check_for_warnings=False):
"""Validate a schema file, ensuring it can save/load and passes validation.
Loads the schema from file, checks the file extension is lowercase,
and validates the schema object for compliance errors and roundtrip conversion.
Parameters:
file_path (str): The path to the schema file to validate.
If loading a TSV file, this should be a single filename where:
Template: basename.tsv, where files are named basename_Struct.tsv, basename_Tag.tsv, etc.
Alternatively, you can point to a directory containing the .tsv files.
check_for_warnings (bool): If True, include warnings in the validation. Default is False.
Returns:
list: A list of validation issue strings. Empty if no issues found.
"""
_, extension = os.path.splitext(file_path)
if extension.lower() != extension:
return [
f"Only fully lowercase extensions are allowed for schema files. Invalid extension on file: {file_path}"
]
validation_issues = []
try:
base_schema = load_schema(file_path)
validation_issues = validate_schema_object(base_schema, file_path, check_for_warnings=check_for_warnings)
except HedFileError as e:
print(f"Saving/loading error: {file_path} {e.message}")
error_text = e.message
if e.issues:
error_text = get_printable_issue_string(e.issues, title=file_path)
validation_issues.append(error_text)
return validation_issues
def add_extension(basename, extension):
"""Generate the final filename for a given extension.
TSV files are placed in a 'hedtsv' subdirectory, while other formats
simply append the extension to the basename.
Note: This function preserves the case of the extension to maintain
compatibility with case-sensitive filesystems. Extensions should only
be normalized (lowercased) for comparison purposes, not for file path
construction.
Parameters:
basename (str): The base path/name of the schema file without extension.
extension (str): The file extension including the dot (e.g., '.xml', '.tsv').
Case is preserved as-is.
Returns:
str: The complete file path with extension applied.
Raises:
TypeError: If extension is not a string.
"""
if not isinstance(extension, str):
raise TypeError(f"extension must be a string, got {type(extension).__name__}")
# Normalize only for comparison, not for path construction
extension_lower = extension.lower()
if extension_lower == ".tsv":
parent_path, basename = os.path.split(basename)
return os.path.join(parent_path, "hedtsv", basename)
return basename + extension
def sort_base_schemas(filenames, add_all_extensions=False):
"""Sort and group changed schema files by their basename.
Groups schema files by their base name, tracking which formats (extensions)
have been modified. Handles special TSV directory structure (hedtsv subfolder).
Returns a nested dict that maps basename -> normalized_extension -> actual_filepath.
This preserves the original file casing for case-sensitive filesystems while
still allowing normalized extension comparisons.
Example input:
["test_schema.mediawiki", "hedtsv/test_schema/test_schema_Tag.tsv", "other_schema.XML"]
Example output:
{
"test_schema": {".mediawiki": "test_schema.mediawiki", ".tsv": "hedtsv/.../test_schema_Tag.tsv"},
"other_schema": {".xml": "other_schema.XML"}
}
Parameters:
filenames (list or container): The changed filenames to process.
add_all_extensions (bool): If True, always return all 4 extensions for any schemas found.
Default is False.
Returns:
dict: A nested dictionary where keys are basenames (str), values are dicts mapping
normalized extensions (str, lowercase) to actual file paths (str, preserving case).
Can include .tsv, .mediawiki, .xml, and .json as keys.
"""
schema_files = defaultdict(dict)
for file_path in filenames:
if not os.path.exists(file_path):
print(f"Ignoring deleted file {file_path}.")
continue
basename, extension = os.path.splitext(file_path)
extension_lower = extension.lower() # Normalize for comparison only
if extension_lower == ".xml" or extension_lower == ".mediawiki":
schema_files[basename][extension_lower] = file_path
continue
elif extension_lower == ".tsv":
tsv_basename = basename.rpartition("_")[0]
full_parent_path, real_basename = os.path.split(tsv_basename)
full_parent_path, real_basename2 = os.path.split(full_parent_path)
real_parent_path, hedtsv_folder = os.path.split(full_parent_path)
if hedtsv_folder != "hedtsv":
print(f"Ignoring file {file_path}. .tsv files must be in an 'hedtsv' subfolder.")
continue
if real_basename != real_basename2:
print(f"Ignoring file {file_path}. .tsv files must be in a subfolder with the same name.")
continue
real_name = os.path.join(real_parent_path, real_basename)
# For TSV files, store the directory path (not individual file path)
# because load_schema expects the directory containing all TSV files
tsv_dir_path = add_extension(real_name, ".tsv")
schema_files[real_name][extension_lower] = tsv_dir_path
else:
print(f"Ignoring file {file_path}")
if add_all_extensions:
for schema_name in schema_files:
for extension in all_extensions:
# Only add if not already present - don't overwrite actual paths
if extension not in schema_files[schema_name]:
# Construct path for missing extensions - use the add_extension logic
schema_files[schema_name][extension] = add_extension(schema_name, extension)
return schema_files
def validate_all_schema_formats(basename):
"""Validate that all 4 format versions of a schema are identical.
Loads the schema from all four formats (MEDIAWIKI, XML, JSON, TSV) and
verifies they are equivalent. Used when multiple formats are modified
simultaneously to ensure consistency.
Parameters:
basename (str): The base path/name of the schema (without extension) to check.
Returns:
list: A list of issue strings if formats differ or loading fails. Empty if all identical.
"""
# Note if more than one is changed, it intentionally checks all 4 even if one wasn't changed.
paths = [add_extension(basename, extension) for extension in all_extensions]
try:
schemas = [load_schema(path) for path in paths]
all_equal = all(obj == schemas[0] for obj in schemas[1:])
if not all_equal:
return [
f"Multiple schemas of type {basename} were modified, and are not equal.\n"
f"Only modify one source schema type at a time(mediawiki, xml, tsv), or modify all 3 at once."
]
except HedFileError as e:
error_message = f"Error loading schema: {e.message}"
return [error_message]
return []
def validate_all_schemas(schema_files):
"""Validate all schema files and formats in the schema dictionary.
Validates each schema file individually and, if multiple formats were edited
for a prerelease schema, ensures all formats exist and are identical.
Parameters:
schema_files (dict): Dictionary mapping basenames (str) to dicts of
{normalized_extension (str) -> actual_filepath (str)} representing
all files changed.
Returns:
list: A list of all validation issues found across all schemas.
"""
all_issues = []
for basename, extension_paths in schema_files.items():
single_schema_issues = []
for _extension, file_path in extension_paths.items():
# Use the actual file path to preserve case on case-sensitive filesystems
single_schema_issues += validate_schema(file_path)
if len(extension_paths) > 1 and not single_schema_issues and "prerelease" in basename:
single_schema_issues += validate_all_schema_formats(basename)
print(f"Validating: {basename}...")
print(f"Extensions: {set(extension_paths.keys())}")
if single_schema_issues:
for issue in single_schema_issues:
print(issue)
all_issues += single_schema_issues
return all_issues
def get_schema_filename(schema_name, schema_version):
"""Assemble the standard filename for a schema given its name and version.
Constructs the conventional HED schema filename without extension or folder path.
Standard schema uses "HED" prefix, library schemas use "HED_name_" prefix.
Example:
get_schema_filename("standard", "8.3.0") returns "HED8.3.0"
get_schema_filename("score", "1.0.0") returns "HED_score_1.0.0"
Parameters:
schema_name (str): The name of the schema. Use "standard" or "" for the standard schema.
schema_version (str): The semantic version number (e.g., "8.3.0").
Returns:
str: The assembled filename without extension or folder path.
"""
schema_name = schema_name.lower()
if schema_name == "standard" or schema_name == "":
return f"HED{schema_version}"
else:
return f"HED_{schema_name}_{schema_version}"
def get_prerelease_path(repo_path, schema_name, schema_version):
"""Get the full path to a prerelease schema's TSV directory in the repository.
Constructs the standard repository path for prerelease schema TSV files,
following the hed-schemas repository structure.
Parameters:
repo_path (str): The path to the hed-schemas folder. Should point into the repository.
schema_name (str): The name of the schema. Use "standard" or "" for the standard schema.
schema_version (str): The semantic version number (e.g., "8.3.0").
Returns:
str: The fully assembled path to the schema's TSV directory.
"""
schema_name = schema_name.lower()
if schema_name == "" or schema_name == "standard":
base_path = "standard_schema"
else:
base_path = os.path.join("library_schemas", schema_name)
base_path = os.path.join(repo_path, base_path, "prerelease")
schema_filename = get_schema_filename(schema_name, schema_version)
return os.path.join(base_path, "hedtsv", schema_filename)
def _roundtrip_all_formats(base_schema, schema_name, save_merged=True, check_prerelease=False):
"""Roundtrip a schema through all four formats and compare to the original.
Serializes the schema to mediawiki, XML, JSON, and TSV, reloads each, and
verifies the reloaded schema matches the original.
Parameters:
base_schema (HedSchema): The schema object to roundtrip.
schema_name (str): Label for error reporting (should include merge context).
save_merged (bool): If True, save the merged (with-standard) form.
If False, save only the library-specific content.
check_prerelease (bool): If True, pass check_prerelease=True to all reload
calls. Required when the schema's withStandard partner exists only in
the prerelease cache directory; otherwise unmerged reloads will fail
partner resolution. Has no effect when save_merged=True because the
merged serialisation embeds the full standard content and no partner
lookup is performed on reload.
Returns:
list: A list of validation issue strings. Empty if no issues found.
"""
issues = []
mediawiki_string = base_schema.get_as_mediawiki_string(save_merged=save_merged)
reloaded_schema = from_string(mediawiki_string, schema_format=".mediawiki", check_prerelease=check_prerelease)
issues += _get_schema_comparison(base_schema, reloaded_schema, schema_name, "mediawiki")
xml_string = base_schema.get_as_xml_string(save_merged=save_merged)
reloaded_schema = from_string(xml_string, schema_format=".xml", check_prerelease=check_prerelease)
issues += _get_schema_comparison(base_schema, reloaded_schema, schema_name, "xml")
json_string = base_schema.get_as_json_string(save_merged=save_merged)
reloaded_schema = from_string(json_string, schema_format=".json", check_prerelease=check_prerelease)
issues += _get_schema_comparison(base_schema, reloaded_schema, schema_name, "json")
tsv_dataframes = base_schema.get_as_dataframes(save_merged=save_merged)
reloaded_schema = from_dataframes(tsv_dataframes, check_prerelease=check_prerelease)
issues += _get_schema_comparison(base_schema, reloaded_schema, schema_name, "tsv")
return issues
def _get_schema_comparison(schema, schema_reload, file_path, file_format):
"""Compare two schema objects and generate error message if they differ.
Private helper function for validating schema roundtrip conversion.
Uses SchemaComparer to identify differences when schemas don't match.
Parameters:
schema (HedSchema): The original schema object.
schema_reload (HedSchema): The reloaded schema object to compare against.
file_path (str): The file path being validated (for error messages).
file_format (str): The format being tested (e.g., "xml", "mediawiki").
Returns:
list: A list containing an error message if schemas differ, empty list if identical.
"""
if schema_reload != schema:
error_text = (
f"Failed to reload {file_path} as {file_format}. "
f"There is either a problem with the source file, or the saving/loading code."
)
title_prompt = (
"If the problem is in the schema file, "
"the following comparison should indicate the approximate source of the issues:"
)
error_text += "\n" + SchemaComparer(schema, schema_reload).compare_differences(title=title_prompt)
return [error_text]
return []