1
0
Fork 0
mirror of https://github.com/LadybirdBrowser/ladybird.git synced 2025-06-07 21:17:07 +09:00

Everywhere: Format all python files with black

This commit is contained in:
Timothy Flynn 2025-05-22 07:30:45 -04:00 committed by Jelle Raaijmakers
parent 9e8336c04f
commit 2f9957c618
Notes: github-actions[bot] 2025-05-22 14:22:50 +00:00
18 changed files with 338 additions and 350 deletions

View file

@ -35,14 +35,14 @@ import os
import ycm_core
DIR_OF_THIS_SCRIPT = os.path.abspath(os.path.dirname(__file__))
SOURCE_EXTENSIONS = ['.cpp', '.c']
SOURCE_EXTENSIONS = [".cpp", ".c"]
database = ycm_core.CompilationDatabase(os.path.join(DIR_OF_THIS_SCRIPT, 'Build/ladybird'))
database = ycm_core.CompilationDatabase(os.path.join(DIR_OF_THIS_SCRIPT, "Build/ladybird"))
def is_header_file(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
return extension in [".h", ".hxx", ".hpp", ".hh"]
def find_corresponding_source_file(filename):
@ -56,7 +56,7 @@ def find_corresponding_source_file(filename):
def Settings(**kwargs): # noqa: N802
if kwargs['language'] != 'cfamily':
if kwargs["language"] != "cfamily":
return {}
# If the file is a header, try to find the corresponding source file and
# retrieve its flags from the compilation database if using one. This is
@ -64,14 +64,14 @@ def Settings(**kwargs): # noqa: N802
# In addition, use this source file as the translation unit. This makes it
# possible to jump from a declaration in the header file to its definition
# in the corresponding source file.
filename = find_corresponding_source_file(kwargs['filename'])
filename = find_corresponding_source_file(kwargs["filename"])
compilation_info = database.GetCompilationInfoForFile(filename)
if not compilation_info.compiler_flags_:
return {}
return {
'flags': list(compilation_info.compiler_flags_),
'include_paths_relative_to_dir': DIR_OF_THIS_SCRIPT,
'override_filename': filename
"flags": list(compilation_info.compiler_flags_),
"include_paths_relative_to_dir": DIR_OF_THIS_SCRIPT,
"override_filename": filename,
}

View file

@ -113,54 +113,63 @@ class ExtraSample(EnumWithExportName):
UnassociatedAlpha = 2
tag_fields = ['id', 'types', 'counts', 'default', 'name', 'associated_enum', 'is_required']
tag_fields = ["id", "types", "counts", "default", "name", "associated_enum", "is_required"]
Tag = namedtuple(
'Tag',
"Tag",
field_names=tag_fields,
defaults=(None,) * len(tag_fields)
defaults=(None,) * len(tag_fields),
)
# FIXME: Some tag have only a few allowed values, we should ensure that
known_tags: List[Tag] = [
Tag('256', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "ImageWidth", is_required=True),
Tag('257', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "ImageLength", is_required=True),
Tag('258', [TIFFType.UnsignedShort], [], None, "BitsPerSample", is_required=False),
Tag('259', [TIFFType.UnsignedShort], [1], None, "Compression", Compression, is_required=True),
Tag('262', [TIFFType.UnsignedShort], [1], None, "PhotometricInterpretation",
PhotometricInterpretation, is_required=True),
Tag('266', [TIFFType.UnsignedShort], [1], FillOrder.LeftToRight, "FillOrder", FillOrder),
Tag('271', [TIFFType.ASCII], [], None, "Make"),
Tag('272', [TIFFType.ASCII], [], None, "Model"),
Tag('273', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [], None, "StripOffsets", is_required=False),
Tag('274', [TIFFType.UnsignedShort], [1], Orientation.Default, "Orientation", Orientation),
Tag('277', [TIFFType.UnsignedShort], [1], None, "SamplesPerPixel", is_required=False),
Tag('278', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "RowsPerStrip", is_required=False),
Tag('279', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [], None, "StripByteCounts", is_required=False),
Tag('282', [TIFFType.UnsignedRational], [1], None, "XResolution"),
Tag('283', [TIFFType.UnsignedRational], [1], None, "YResolution"),
Tag('284', [TIFFType.UnsignedShort], [1], PlanarConfiguration.Chunky, "PlanarConfiguration", PlanarConfiguration),
Tag('285', [TIFFType.ASCII], [], None, "PageName"),
Tag('292', [TIFFType.UnsignedLong], [1], 0, "T4Options"),
Tag('296', [TIFFType.UnsignedShort], [1], ResolutionUnit.Inch, "ResolutionUnit", ResolutionUnit),
Tag('305', [TIFFType.ASCII], [], None, "Software"),
Tag('306', [TIFFType.ASCII], [20], None, "DateTime"),
Tag('315', [TIFFType.ASCII], [], None, "Artist"),
Tag('317', [TIFFType.UnsignedShort], [1], Predictor.NoPrediction, "Predictor", Predictor),
Tag('320', [TIFFType.UnsignedShort], [], None, "ColorMap"),
Tag('322', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "TileWidth"),
Tag('323', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "TileLength"),
Tag('324', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [], None, "TileOffsets"),
Tag('325', [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [], None, "TileByteCounts"),
Tag('338', [TIFFType.UnsignedShort], [], None, "ExtraSamples", ExtraSample),
Tag('339', [TIFFType.UnsignedShort], [], SampleFormat.Unsigned, "SampleFormat", SampleFormat),
Tag('34665', [TIFFType.UnsignedLong, TIFFType.IFD], [1], None, "ExifIFD"),
Tag('34675', [TIFFType.Undefined], [], None, "ICCProfile"),
Tag("256", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "ImageWidth", is_required=True),
Tag("257", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "ImageLength", is_required=True),
Tag("258", [TIFFType.UnsignedShort], [], None, "BitsPerSample", is_required=False),
Tag("259", [TIFFType.UnsignedShort], [1], None, "Compression", Compression, is_required=True),
Tag(
"262",
[TIFFType.UnsignedShort],
[1],
None,
"PhotometricInterpretation",
PhotometricInterpretation,
is_required=True,
),
Tag("266", [TIFFType.UnsignedShort], [1], FillOrder.LeftToRight, "FillOrder", FillOrder),
Tag("271", [TIFFType.ASCII], [], None, "Make"),
Tag("272", [TIFFType.ASCII], [], None, "Model"),
Tag("273", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [], None, "StripOffsets", is_required=False),
Tag("274", [TIFFType.UnsignedShort], [1], Orientation.Default, "Orientation", Orientation),
Tag("277", [TIFFType.UnsignedShort], [1], None, "SamplesPerPixel", is_required=False),
Tag("278", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "RowsPerStrip", is_required=False),
Tag("279", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [], None, "StripByteCounts", is_required=False),
Tag("282", [TIFFType.UnsignedRational], [1], None, "XResolution"),
Tag("283", [TIFFType.UnsignedRational], [1], None, "YResolution"),
Tag("284", [TIFFType.UnsignedShort], [1], PlanarConfiguration.Chunky, "PlanarConfiguration", PlanarConfiguration),
Tag("285", [TIFFType.ASCII], [], None, "PageName"),
Tag("292", [TIFFType.UnsignedLong], [1], 0, "T4Options"),
Tag("296", [TIFFType.UnsignedShort], [1], ResolutionUnit.Inch, "ResolutionUnit", ResolutionUnit),
Tag("305", [TIFFType.ASCII], [], None, "Software"),
Tag("306", [TIFFType.ASCII], [20], None, "DateTime"),
Tag("315", [TIFFType.ASCII], [], None, "Artist"),
Tag("317", [TIFFType.UnsignedShort], [1], Predictor.NoPrediction, "Predictor", Predictor),
Tag("320", [TIFFType.UnsignedShort], [], None, "ColorMap"),
Tag("322", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "TileWidth"),
Tag("323", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [1], None, "TileLength"),
Tag("324", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [], None, "TileOffsets"),
Tag("325", [TIFFType.UnsignedShort, TIFFType.UnsignedLong], [], None, "TileByteCounts"),
Tag("338", [TIFFType.UnsignedShort], [], None, "ExtraSamples", ExtraSample),
Tag("339", [TIFFType.UnsignedShort], [], SampleFormat.Unsigned, "SampleFormat", SampleFormat),
Tag("34665", [TIFFType.UnsignedLong, TIFFType.IFD], [1], None, "ExifIFD"),
Tag("34675", [TIFFType.Undefined], [], None, "ICCProfile"),
]
HANDLE_TAG_SIGNATURE_TEMPLATE = ("ErrorOr<void> {namespace}handle_tag(Function<ErrorOr<void>(u32)>&& subifd_handler, "
HANDLE_TAG_SIGNATURE_TEMPLATE = (
"ErrorOr<void> {namespace}handle_tag(Function<ErrorOr<void>(u32)>&& subifd_handler, "
"ExifMetadata& metadata, u16 tag, {namespace}Type type, u32 count, "
"Vector<{namespace}Value>&& value)")
"Vector<{namespace}Value>&& value)"
)
HANDLE_TAG_SIGNATURE = HANDLE_TAG_SIGNATURE_TEMPLATE.format(namespace="")
HANDLE_TAG_SIGNATURE_TIFF_NAMESPACE = HANDLE_TAG_SIGNATURE_TEMPLATE.format(namespace="TIFF::")
@ -176,10 +185,10 @@ LICENSE = R"""/*
def export_enum_to_cpp(e: Type[EnumWithExportName]) -> str:
output = f'enum class {e.export_name()} {{\n'
output = f"enum class {e.export_name()} {{\n"
for entry in e:
output += f' {entry.name} = {entry.value},\n'
output += f" {entry.name} = {entry.value},\n"
output += "};\n"
return output
@ -188,12 +197,12 @@ def export_enum_to_cpp(e: Type[EnumWithExportName]) -> str:
def export_enum_to_string_converter(enums: List[Type[EnumWithExportName]]) -> str:
stringifier_internals = []
for e in enums:
single_stringifier = fR""" if constexpr (IsSame<E, {e.export_name()}>) {{
single_stringifier = Rf""" if constexpr (IsSame<E, {e.export_name()}>) {{
switch (value) {{
default:
return "Invalid value for {e.export_name()}"sv;"""
for entry in e:
single_stringifier += fR"""
single_stringifier += Rf"""
case {e.export_name()}::{entry.name}:
return "{entry.name}"sv;"""
@ -202,9 +211,9 @@ def export_enum_to_string_converter(enums: List[Type[EnumWithExportName]]) -> st
}"""
stringifier_internals.append(single_stringifier)
stringifier_internals_str = '\n'.join(stringifier_internals)
stringifier_internals_str = "\n".join(stringifier_internals)
out = fR"""template<Enum E>
out = Rf"""template<Enum E>
StringView name_for_enum_tag_value(E value) {{
{stringifier_internals_str}
VERIFY_NOT_REACHED();
@ -219,7 +228,7 @@ def export_tag_related_enums(tags: List[Tag]) -> str:
if tag.associated_enum:
exported_enums.append(export_enum_to_cpp(tag.associated_enum))
return '\n'.join(exported_enums)
return "\n".join(exported_enums)
def promote_type(t: TIFFType) -> TIFFType:
@ -236,19 +245,19 @@ def tiff_type_to_cpp(t: TIFFType, with_promotion: bool = True) -> str:
if with_promotion:
t = promote_type(t)
if t in [TIFFType.ASCII, TIFFType.UTF8]:
return 'String'
return "String"
if t == TIFFType.Undefined:
return 'ByteBuffer'
return "ByteBuffer"
if t == TIFFType.UnsignedShort:
return 'u16'
return "u16"
if t == TIFFType.UnsignedLong or t == TIFFType.IFD:
return 'u32'
return "u32"
if t == TIFFType.UnsignedRational:
return 'TIFF::Rational<u32>'
return "TIFF::Rational<u32>"
if t == TIFFType.Float:
return 'float'
return "float"
if t == TIFFType.Double:
return 'double'
return "double"
raise RuntimeError(f'Type "{t}" not recognized, please update tiff_type_to_read_only_cpp()')
@ -284,13 +293,13 @@ def retrieve_biggest_type(types: List[TIFFType]) -> TIFFType:
def pascal_case_to_snake_case(name: str) -> str:
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
name = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", name).lower()
def default_value_to_cpp(value: Any) -> str:
if isinstance(value, EnumWithExportName):
return f'TIFF::{value.export_name()}::{value.name}'
return f"TIFF::{value.export_name()}::{value.name}"
return str(value)
@ -309,36 +318,36 @@ def generate_getter(tag: Tag) -> str:
if single_count:
return_type = tag_final_type
if is_container(biggest_type):
return_type += ' const&'
return_type += " const&"
unpacked_if_needed = f"return {extracted_value_template.format(0)};"
else:
if len(tag.counts) == 1:
container_type = f'Array<{tag_final_type}, {tag.counts[0]}>'
container_initialization = f'{container_type} tmp{{}};'
container_type = f"Array<{tag_final_type}, {tag.counts[0]}>"
container_initialization = f"{container_type} tmp{{}};"
else:
container_type = f'Vector<{tag_final_type}>'
container_initialization = fR"""{container_type} tmp{{}};
container_type = f"Vector<{tag_final_type}>"
container_initialization = Rf"""{container_type} tmp{{}};
auto maybe_failure = tmp.try_resize(possible_value->size());
if (maybe_failure.is_error())
return OptionalNone {{}};
"""
return_type = container_type
unpacked_if_needed = fR"""
unpacked_if_needed = Rf"""
{container_initialization}
for (u32 i = 0; i < possible_value->size(); ++i)
tmp[i] = {extracted_value_template.format('i')};
return tmp;"""
signature = fR" Optional<{return_type}> {pascal_case_to_snake_case(tag.name)}() const"
signature = Rf" Optional<{return_type}> {pascal_case_to_snake_case(tag.name)}() const"
if tag.default is not None and single_count:
return_if_empty = f'{default_value_to_cpp(tag.default)}'
return_if_empty = f"{default_value_to_cpp(tag.default)}"
else:
return_if_empty = 'OptionalNone {}'
return_if_empty = "OptionalNone {}"
body = fR"""
body = Rf"""
{{
auto const& possible_value = m_data.get("{tag.name}"sv);
if (!possible_value.has_value())
@ -351,9 +360,9 @@ def generate_getter(tag: Tag) -> str:
def generate_metadata_class(tags: List[Tag]) -> str:
getters = '\n'.join([generate_getter(tag) for tag in tags])
getters = "\n".join([generate_getter(tag) for tag in tags])
output = fR"""class ExifMetadata : public Metadata {{
output = Rf"""class ExifMetadata : public Metadata {{
public:
virtual ~ExifMetadata() = default;
@ -386,7 +395,7 @@ private:
def generate_metadata_file(tags: List[Tag]) -> str:
output = fR"""{LICENSE}
output = Rf"""{LICENSE}
#pragma once
@ -468,17 +477,17 @@ struct AK::Formatter<Gfx::TIFF::Value> : Formatter<FormatString> {{
def generate_tag_handler(tag: Tag) -> str:
not_in_type_list = f"({' && '.join([f'type != Type::{t.name}' for t in tag.types])})"
not_in_count_list = ''
not_in_count_list = ""
if len(tag.counts) != 0:
not_in_count_list = f"|| ({' && '.join([f'count != {c}' for c in tag.counts])})"
pre_condition = fR"""if ({not_in_type_list}
pre_condition = Rf"""if ({not_in_type_list}
{not_in_count_list})
return Error::from_string_literal("TIFFImageDecoderPlugin: Tag {tag.name} invalid");"""
check_value = ''
check_value = ""
if tag.associated_enum is not None:
not_in_value_list = f"({' && '.join([f'v != {v.value}' for v in tag.associated_enum])})"
check_value = fR"""
check_value = Rf"""
for (u32 i = 0; i < value.size(); ++i) {{
TRY(value[i].visit(
[]({tiff_type_to_cpp(tag.types[0])} const& v) -> ErrorOr<void> {{
@ -493,13 +502,13 @@ def generate_tag_handler(tag: Tag) -> str:
}}
"""
handle_subifd = ''
handle_subifd = ""
if TIFFType.IFD in tag.types:
if tag.counts != [1]:
raise RuntimeError("Accessing `value[0]` in the C++ code might fail!")
handle_subifd = f'TRY(subifd_handler(value[0].get<{tiff_type_to_cpp(TIFFType.IFD)}>()));'
handle_subifd = f"TRY(subifd_handler(value[0].get<{tiff_type_to_cpp(TIFFType.IFD)}>()));"
output = fR""" case {tag.id}:
output = Rf""" case {tag.id}:
// {tag.name}
dbgln_if(TIFF_DEBUG, "{tag.name}({{}}): {{}}", name_for_enum_tag_value(type), format_tiff_value(tag, value));
@ -515,22 +524,42 @@ def generate_tag_handler(tag: Tag) -> str:
def generate_tag_handler_file(tags: List[Tag]) -> str:
formatter_for_tag_with_enum = '\n'.join([fR""" case {tag.id}:
formatter_for_tag_with_enum = "\n".join(
[
Rf""" case {tag.id}:
return MUST(String::from_utf8(
name_for_enum_tag_value(static_cast<{tag.associated_enum.export_name()}>(v.get<u32>()))));"""
for tag in tags if tag.associated_enum])
for tag in tags
if tag.associated_enum
]
)
ensure_tags_are_present = '\n'.join([fR""" if (!metadata.{pascal_case_to_snake_case(tag.name)}().has_value())
ensure_tags_are_present = "\n".join(
[
Rf""" if (!metadata.{pascal_case_to_snake_case(tag.name)}().has_value())
return Error::from_string_literal("Unable to decode image, missing required tag {tag.name}.");
""" for tag in filter(lambda tag: tag.is_required, known_tags)])
"""
for tag in filter(lambda tag: tag.is_required, known_tags)
]
)
tiff_type_from_u16_cases = '\n'.join([fR""" case to_underlying(Type::{t.name}):
return Type::{t.name};""" for t in TIFFType])
tiff_type_from_u16_cases = "\n".join(
[
Rf""" case to_underlying(Type::{t.name}):
return Type::{t.name};"""
for t in TIFFType
]
)
size_of_tiff_type_cases = '\n'.join([fR""" case Type::{t.name}:
return {t.size};""" for t in TIFFType])
size_of_tiff_type_cases = "\n".join(
[
Rf""" case Type::{t.name}:
return {t.size};"""
for t in TIFFType
]
)
output = fR"""{LICENSE}
output = Rf"""{LICENSE}
#include <AK/Debug.h>
#include <AK/String.h>
@ -592,7 +621,7 @@ static String value_formatter(u32 tag_id, Value const& v) {{
switch (tag) {{
"""
output += '\n'.join([generate_tag_handler(t) for t in tags])
output += "\n".join([generate_tag_handler(t) for t in tags])
output += R"""
default:
@ -612,26 +641,26 @@ def update_file(target: Path, new_content: str):
should_update = True
if target.exists():
with target.open('r') as file:
with target.open("r") as file:
content = file.read()
if content == new_content:
should_update = False
if should_update:
with target.open('w') as file:
with target.open("w") as file:
file.write(new_content)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output')
parser.add_argument("-o", "--output")
args = parser.parse_args()
output_path = Path(args.output)
update_file(output_path / 'TIFFMetadata.h', generate_metadata_file(known_tags))
update_file(output_path / 'TIFFTagHandler.cpp', generate_tag_handler_file(known_tags))
update_file(output_path / "TIFFMetadata.h", generate_metadata_file(known_tags))
update_file(output_path / "TIFFTagHandler.cpp", generate_tag_handler_file(known_tags))
if __name__ == '__main__':
if __name__ == "__main__":
main()

View file

@ -7,8 +7,8 @@ import sys
# FIXME: Include Layout tests in general.
# They are currently deferred to a later PR to make review easier for now.
RE_RELEVANT_FILE = re.compile('^Tests/LibWeb/(Ref|Screenshot|Text)/(.(?!wpt-import/))*\\.html$')
RE_DOCTYPE = re.compile('^<!doctype .*>', re.IGNORECASE)
RE_RELEVANT_FILE = re.compile("^Tests/LibWeb/(Ref|Screenshot|Text)/(.(?!wpt-import/))*\\.html$")
RE_DOCTYPE = re.compile("^<!doctype .*>", re.IGNORECASE)
def should_check_file(filename):
@ -20,7 +20,7 @@ def find_files_here_or_argv():
raw_list = sys.argv[1:]
else:
process = subprocess.run(["git", "ls-files"], check=True, capture_output=True)
raw_list = process.stdout.decode().strip('\n').split('\n')
raw_list = process.stdout.decode().strip("\n").split("\n")
return filter(should_check_file, raw_list)
@ -29,19 +29,21 @@ def run():
files_with_missing_doctypes = []
for filename in find_files_here_or_argv():
with open(filename, 'r') as file:
with open(filename, "r") as file:
if not RE_DOCTYPE.search(file.readline()):
files_with_missing_doctypes.append(filename)
if files_with_missing_doctypes:
print('The following HTML files should include a doctype declaration at the start of the file but don\'t:\n' +
'You should add <!DOCTYPE html> to the very beginning of these files, except if they absolutely need ' +
'to run in quirks mode. In that case, you can clearly indicate so with a bogus doctype that says ' +
'"quirks" instead of "html".\n',
' '.join(files_with_missing_doctypes))
print(
"The following HTML files should include a doctype declaration at the start of the file but don't:\n"
+ "You should add <!DOCTYPE html> to the very beginning of these files, except if they absolutely need "
+ "to run in quirks mode. In that case, you can clearly indicate so with a bogus doctype that says "
+ '"quirks" instead of "html".\n',
" ".join(files_with_missing_doctypes),
)
sys.exit(1)
if __name__ == '__main__':
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__) + "/..")
run()

View file

@ -16,16 +16,16 @@ lines_to_skip = re.compile(
parser = argparse.ArgumentParser()
parser.add_argument("--overwrite-inplace", action=argparse.BooleanOptionalAction)
parser.add_argument('filenames', nargs='*')
parser.add_argument("filenames", nargs="*")
args = parser.parse_args()
SINGLE_PAGE_HTML_SPEC_LINK = re.compile('//.*https://html\\.spec\\.whatwg\\.org/#')
SINGLE_PAGE_HTML_SPEC_LINK = re.compile("//.*https://html\\.spec\\.whatwg\\.org/#")
def should_check_file(filename):
if not filename.endswith(".idl"):
return False
if filename.startswith('Tests/LibWeb/'):
if filename.startswith("Tests/LibWeb/"):
return False
return True
@ -62,23 +62,24 @@ def run():
continue
did_fail = True
files_without_four_leading_spaces.add(filename)
print(
f"{filename}:{line_number} error: Line does not start with four spaces:{line.rstrip()}")
print(f"{filename}:{line_number} error: Line does not start with four spaces:{line.rstrip()}")
lines.append(line)
if args.overwrite_inplace:
with open(filename, "w") as f:
f.writelines(lines)
if files_without_four_leading_spaces:
print("\nWebIDL files that have lines without four leading spaces:",
" ".join(files_without_four_leading_spaces))
if not args.overwrite_inplace:
print(
f"\nTo fix the WebIDL files in place, run: ./Meta/{script_name} --overwrite-inplace")
"\nWebIDL files that have lines without four leading spaces:", " ".join(files_without_four_leading_spaces)
)
if not args.overwrite_inplace:
print(f"\nTo fix the WebIDL files in place, run: ./Meta/{script_name} --overwrite-inplace")
if files_with_single_page_html_spec_link:
print("\nWebIDL files that have links to the single-page HTML spec:",
" ".join(files_with_single_page_html_spec_link))
print(
"\nWebIDL files that have links to the single-page HTML spec:",
" ".join(files_with_single_page_html_spec_link),
)
if did_fail:
sys.exit(1)

View file

@ -6,22 +6,22 @@ import subprocess
import sys
RE_RELEVANT_FILE_EXTENSION = re.compile('\\.(cpp|h|mm|swift|gml|html|js|css|sh|py|json|txt|cmake|gn|gni)$')
RE_RELEVANT_FILE_EXTENSION = re.compile("\\.(cpp|h|mm|swift|gml|html|js|css|sh|py|json|txt|cmake|gn|gni)$")
def should_check_file(filename):
if not RE_RELEVANT_FILE_EXTENSION.search(filename):
return False
if filename.startswith('Tests/LibWeb/Layout/'):
if filename.startswith("Tests/LibWeb/Layout/"):
return False
if filename.startswith('Tests/LibWeb/Ref/'):
if filename.startswith("Tests/LibWeb/Ref/"):
return False
if filename.startswith('Tests/LibWeb/Text/'):
if filename.startswith("Tests/LibWeb/Text/"):
return False
if filename.startswith('Meta/CMake/vcpkg/overlay-ports/'):
if filename.startswith("Meta/CMake/vcpkg/overlay-ports/"):
return False
if filename.endswith('.txt'):
return 'CMake' in filename
if filename.endswith(".txt"):
return "CMake" in filename
return True
@ -30,7 +30,7 @@ def find_files_here_or_argv():
raw_list = sys.argv[1:]
else:
process = subprocess.run(["git", "ls-files"], check=True, capture_output=True)
raw_list = process.stdout.decode().strip('\n').split('\n')
raw_list = process.stdout.decode().strip("\n").split("\n")
return filter(should_check_file, raw_list)
@ -46,7 +46,7 @@ def run():
f.seek(0, os.SEEK_END)
f.seek(f.tell() - 1, os.SEEK_SET)
if f.read(1) != '\n':
if f.read(1) != "\n":
did_fail = True
no_newline_at_eof_errors.append(filename)
continue
@ -56,7 +56,7 @@ def run():
char = f.read(1)
if not char.isspace():
break
if char == '\n':
if char == "\n":
did_fail = True
blank_lines_at_eof_errors.append(filename)
break
@ -70,6 +70,6 @@ def run():
sys.exit(1)
if __name__ == '__main__':
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__) + "/..")
run()

View file

@ -14,23 +14,24 @@ import sys
# * SPDX-License-Identifier: BSD-2-Clause
# */
GOOD_LICENSE_HEADER_PATTERN = re.compile(
'^/\\*\n' +
'( \\* Copyright \\(c\\) [0-9]{4}(-[0-9]{4})?, .*\n)+' +
' \\*\n' +
' \\* SPDX-License-Identifier: BSD-2-Clause\n' +
' \\*/\n' +
'\n')
"^/\\*\n"
+ "( \\* Copyright \\(c\\) [0-9]{4}(-[0-9]{4})?, .*\n)+"
+ " \\*\n"
+ " \\* SPDX-License-Identifier: BSD-2-Clause\n"
+ " \\*/\n"
+ "\n"
)
LICENSE_HEADER_CHECK_EXCLUDES = {
'AK/Checked.h',
'AK/Function.h',
'Libraries/LibCore/SocketpairWindows.cpp',
"AK/Checked.h",
"AK/Function.h",
"Libraries/LibCore/SocketpairWindows.cpp",
}
# We check that "#pragma once" is present
PRAGMA_ONCE_STRING = '#pragma once'
PRAGMA_ONCE_STRING = "#pragma once"
# We make sure that there's a blank line before and after pragma once
GOOD_PRAGMA_ONCE_PATTERN = re.compile('(^|\\S\n\n)#pragma once(\n\n\\S.|$)')
GOOD_PRAGMA_ONCE_PATTERN = re.compile("(^|\\S\n\n)#pragma once(\n\n\\S.|$)")
# LibC is supposed to be a system library; don't mention the directory.
BAD_INCLUDE_LIBC = re.compile("# *include <LibC/")
@ -43,28 +44,26 @@ ANY_INCLUDE_PATTERN = re.compile('^ *# *include\\b.*[>"](?!\\)).*$', re.M)
SYSTEM_INCLUDE_PATTERN = re.compile("^ *# *include *<([^>]+)>(?: /[*/].*)?$")
LOCAL_INCLUDE_PATTERN = re.compile('^ *# *include *"([^>]+)"(?: /[*/].*)?$')
INCLUDE_CHECK_EXCLUDES = {
}
INCLUDE_CHECK_EXCLUDES = {}
LOCAL_INCLUDE_ROOT_OVERRIDES = {
}
LOCAL_INCLUDE_ROOT_OVERRIDES = {}
LOCAL_INCLUDE_SUFFIX_EXCLUDES = [
# Some Qt files are required to include their .moc files, which will be located in a deep
# subdirectory that we won't find from here.
'.moc',
".moc",
]
# We check for and disallow any comments linking to the single-page HTML spec because it takes a long time to load.
SINGLE_PAGE_HTML_SPEC_LINK = re.compile('//.*https://html\\.spec\\.whatwg\\.org/#')
SINGLE_PAGE_HTML_SPEC_LINK = re.compile("//.*https://html\\.spec\\.whatwg\\.org/#")
def should_check_file(filename):
if not filename.endswith('.cpp') and not filename.endswith('.h'):
if not filename.endswith(".cpp") and not filename.endswith(".h"):
return False
if filename.startswith('Base/'):
if filename.startswith("Base/"):
return False
if filename.startswith('Meta/CMake/vcpkg/overlay-ports/'):
if filename.startswith("Meta/CMake/vcpkg/overlay-ports/"):
return False
return True
@ -74,15 +73,13 @@ def find_files_here_or_argv():
raw_list = sys.argv[1:]
else:
process = subprocess.run(["git", "ls-files"], check=True, capture_output=True)
raw_list = process.stdout.decode().strip('\n').split('\n')
raw_list = process.stdout.decode().strip("\n").split("\n")
return filter(should_check_file, raw_list)
def is_in_prefix_list(filename, prefix_list):
return any(
filename.startswith(prefix) for prefix in prefix_list
)
return any(filename.startswith(prefix) for prefix in prefix_list)
def find_matching_prefix(filename, prefix_list):
@ -102,12 +99,12 @@ def run():
errors_single_page_html_spec = []
for filename in find_files_here_or_argv():
with open(filename, mode="r", encoding='utf-8') as f:
with open(filename, mode="r", encoding="utf-8") as f:
file_content = f.read()
if not is_in_prefix_list(filename, LICENSE_HEADER_CHECK_EXCLUDES):
if not GOOD_LICENSE_HEADER_PATTERN.search(file_content):
errors_license.append(filename)
if filename.endswith('.h'):
if filename.endswith(".h"):
if GOOD_PRAGMA_ONCE_PATTERN.search(file_content):
# Excellent, the formatting is correct.
pass
@ -185,16 +182,13 @@ def run():
)
have_errors = True
if errors_single_page_html_spec:
print(
"Files with links to the single-page HTML spec:",
" ".join(errors_single_page_html_spec)
)
print("Files with links to the single-page HTML spec:", " ".join(errors_single_page_html_spec))
have_errors = True
if have_errors:
sys.exit(1)
if __name__ == '__main__':
if __name__ == "__main__":
os.chdir(os.path.dirname(__file__) + "/..")
run()

View file

@ -1,6 +1,6 @@
#!/usr/bin/env python3
r"""
Embeds a file into a String, a la #embed from C++23
Embeds a file into a String, a la #embed from C++23
"""
import argparse
@ -8,31 +8,26 @@ import sys
def main():
parser = argparse.ArgumentParser(
epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input', help='input file to stringify')
parser.add_argument('-o', '--output', required=True,
help='output file')
parser.add_argument('-n', '--variable-name', required=True,
help='name of the C++ variable')
parser.add_argument('-s', '--namespace', required=False,
help='C++ namespace to put the string into')
parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("input", help="input file to stringify")
parser.add_argument("-o", "--output", required=True, help="output file")
parser.add_argument("-n", "--variable-name", required=True, help="name of the C++ variable")
parser.add_argument("-s", "--namespace", required=False, help="C++ namespace to put the string into")
args = parser.parse_args()
with open(args.output, 'w') as f:
with open(args.output, "w") as f:
f.write("#include <AK/String.h>\n")
if args.namespace:
f.write(f"namespace {args.namespace} {{\n")
f.write(f"extern String {args.variable_name};\n")
f.write(f"String {args.variable_name} = R\"~~~(")
with open(args.input, 'r') as input:
f.write(f'String {args.variable_name} = R"~~~(')
with open(args.input, "r") as input:
for line in input.readlines():
f.write(f"{line}")
f.write(")~~~\"_string;\n")
f.write(')~~~"_string;\n')
if args.namespace:
f.write("}\n")
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View file

@ -149,9 +149,7 @@ def parse_args(raw_args: list[dict[str, str]]) -> list[WasmValue]:
def parse_action(action: dict[str, Any]) -> Action:
match action["type"]:
case "invoke":
return Invoke(
action["field"], parse_args(action["args"]), action.get("module")
)
return Invoke(action["field"], parse_args(action["args"]), action.get("module"))
case "get":
return Get(action["field"], action.get("module"))
case _:
@ -165,9 +163,7 @@ def parse(raw: dict[str, Any]) -> WastDescription:
cmd: Command
match raw_cmd["type"]:
case "module":
cmd = ModuleCommand(
line, Path(raw_cmd["filename"]), raw_cmd.get("name")
)
cmd = ModuleCommand(line, Path(raw_cmd["filename"]), raw_cmd.get("name"))
case "action":
cmd = ActionCommand(line, parse_action(raw_cmd["action"]))
case "register":
@ -176,9 +172,7 @@ def parse(raw: dict[str, Any]) -> WastDescription:
cmd = AssertReturn(
line,
parse_action(raw_cmd["action"]),
parse_value(raw_cmd["expected"][0])
if len(raw_cmd["expected"]) == 1
else None,
parse_value(raw_cmd["expected"][0]) if len(raw_cmd["expected"]) == 1 else None,
)
case "assert_trap" | "assert_exhaustion":
cmd = AssertTrap(line, raw_cmd["text"], parse_action(raw_cmd["action"]))
@ -260,10 +254,7 @@ def gen_value_arg(value: WasmValue) -> str:
case "f32":
return str(int(value.value)) + f" /* {float_to_str(int(value.value))} */"
case "f64":
return (
str(int(value.value))
+ f"n /* {float_to_str(int(value.value), double=True)} */"
)
return str(int(value.value)) + f"n /* {float_to_str(int(value.value), double=True)} */"
case "externref" | "funcref" | "v128":
return value.value
case _:
@ -336,9 +327,7 @@ expect(() => parseWebAssemblyModule(content, globalImportObject)).toThrow(Error,
def gen_pretty_expect(expr: str, got: str, expect: str):
print(
f"if (!{expr}) {{ expect().fail(`Failed with ${{{got}}}, expected {expect}`); }}"
)
print(f"if (!{expr}) {{ expect().fail(`Failed with ${{{got}}}, expected {expect}`); }}")
def gen_invoke(
@ -354,11 +343,7 @@ def gen_invoke(
module = "module"
if invoke.module is not None:
module = f'namedModules["{invoke.module}"]'
utf8 = (
str(invoke.field.encode("utf8"))[2:-1]
.replace("\\'", "'")
.replace("`", "${'`'}")
)
utf8 = str(invoke.field.encode("utf8"))[2:-1].replace("\\'", "'").replace("`", "${'`'}")
print(
f"""_test(`execution of {ctx.current_module_name}: {utf8} (line {line})`, () => {{
let _field = {module}.getExport(decodeURIComponent(escape(`{utf8}`)));
@ -428,9 +413,7 @@ def gen_command(command: Command, ctx: Context):
if isinstance(command.action, Invoke):
gen_invoke(command.line, command.action, None, ctx)
else:
raise GenerateException(
f"Not implemented: top-level {type(command.action)}"
)
raise GenerateException(f"Not implemented: top-level {type(command.action)}")
case AssertInvalid():
gen_invalid(command, ctx)
case Register():
@ -444,9 +427,7 @@ def gen_command(command: Command, ctx: Context):
case AssertTrap():
if not isinstance(command.action, Invoke):
raise GenerateException(f"Not implemented: {type(command.action)}")
gen_invoke(
command.line, command.action, None, ctx, fail_msg=command.messsage
)
gen_invoke(command.line, command.action, None, ctx, fail_msg=command.messsage)
def generate(description: WastDescription):

View file

@ -1,6 +1,6 @@
#!/usr/bin/env python3
"""
Generates a clang module map for a given directory
Generates a clang module map for a given directory
"""
import argparse
@ -11,26 +11,24 @@ import sys
def write_file_if_not_same(file_path, content):
try:
with open(file_path, 'r') as f:
with open(file_path, "r") as f:
if f.read() == content:
return
except FileNotFoundError:
pass
with open(file_path, 'w') as f:
with open(file_path, "w") as f:
f.write(content)
def main():
parser = argparse.ArgumentParser(
epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('directory', help='source directory to generate module map for')
parser.add_argument('--module-name', help='top-level module name')
parser.add_argument('--module-map', required=True, help='output module map file')
parser.add_argument('--vfs-map', required=True, help='output VFS map file')
parser.add_argument('--exclude-files', nargs='*', required=False, help='files to exclude in the module map')
parser.add_argument('--generated-files', nargs='*', help='extra files to include in the module map')
parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("directory", help="source directory to generate module map for")
parser.add_argument("--module-name", help="top-level module name")
parser.add_argument("--module-map", required=True, help="output module map file")
parser.add_argument("--vfs-map", required=True, help="output VFS map file")
parser.add_argument("--exclude-files", nargs="*", required=False, help="files to exclude in the module map")
parser.add_argument("--generated-files", nargs="*", help="extra files to include in the module map")
args = parser.parse_args()
root = pathlib.Path(args.directory)
@ -41,14 +39,14 @@ def main():
pathlib.Path(args.vfs_map).parent.mkdir(parents=True, exist_ok=True)
exclude_files = set(args.exclude_files) if args.exclude_files else set()
header_files = [f for f in root.rglob('**/*.h') if f.is_file() and f.name not in exclude_files]
header_files = [f for f in root.rglob("**/*.h") if f.is_file() and f.name not in exclude_files]
module_name = args.module_name if args.module_name else root.name
module_map = f"module {module_name} {{\n"
for header_file in header_files:
module_map += f" header \"{header_file.relative_to(root)}\"\n"
module_map += f' header "{header_file.relative_to(root)}"\n'
for generated_file in args.generated_files:
module_map += f" header \"{generated_file}\"\n"
module_map += f' header "{generated_file}"\n'
module_map += " requires cplusplus\n"
module_map += " export *\n"
module_map += "}\n"
@ -60,14 +58,14 @@ def main():
{
"name": f"{root}/module.modulemap",
"type": "file",
"external-contents": f"{args.module_map}"
"external-contents": f"{args.module_map}",
}
]
],
}
write_file_if_not_same(args.module_map, module_map)
write_file_if_not_same(args.vfs_map, yaml.dump(vfs_map))
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View file

@ -20,7 +20,7 @@ import urllib.request
def compute_sha256(path):
sha256 = hashlib.sha256()
with open(path, 'rb') as file:
with open(path, "rb") as file:
while True:
data = file.read(256 << 10)
if not data:
@ -32,26 +32,19 @@ def compute_sha256(path):
def main():
parser = argparse.ArgumentParser(
epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('url', help='input url')
parser.add_argument('-o', '--output', required=True,
help='output file')
parser.add_argument('-v', '--version', required=True,
help='version of file to detect mismatches and redownload')
parser.add_argument('-f', '--version-file', required=True,
help='filesystem location to cache version')
parser.add_argument('-c', "--cache-path", required=False,
help='path for cached files to clear on version mismatch')
parser.add_argument('-s', "--sha256", required=False,
help='expected SHA-256 hash of the downloaded file')
parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("url", help="input url")
parser.add_argument("-o", "--output", required=True, help="output file")
parser.add_argument("-v", "--version", required=True, help="version of file to detect mismatches and redownload")
parser.add_argument("-f", "--version-file", required=True, help="filesystem location to cache version")
parser.add_argument("-c", "--cache-path", required=False, help="path for cached files to clear on version mismatch")
parser.add_argument("-s", "--sha256", required=False, help="expected SHA-256 hash of the downloaded file")
args = parser.parse_args()
version_from_file = ''
version_from_file = ""
version_file = pathlib.Path(args.version_file)
if version_file.exists():
with version_file.open('r') as f:
with version_file.open("r") as f:
version_from_file = f.readline().strip()
if version_from_file == args.version:
@ -83,9 +76,9 @@ def main():
print(f"Actual: {actual_sha256}")
return 1
with open(version_file, 'w') as f:
with open(version_file, "w") as f:
f.write(args.version)
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View file

@ -26,11 +26,11 @@ def extract_member(file, destination, path):
destination_path.parent.mkdir(parents=True, exist_ok=True)
if isinstance(file, tarfile.TarFile):
with file.extractfile(path) as member:
destination_path.write_text(member.read().decode('utf-8'))
destination_path.write_text(member.read().decode("utf-8"))
else:
assert isinstance(file, zipfile.ZipFile)
with file.open(path) as member:
destination_path.write_text(member.read().decode('utf-8'))
destination_path.write_text(member.read().decode("utf-8"))
def extract_directory(file, destination, path):
@ -55,15 +55,11 @@ def extract_directory(file, destination, path):
def main():
parser = argparse.ArgumentParser(
epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('archive', help='input archive')
parser.add_argument('paths', nargs='*', help='paths to extract from the archive')
parser.add_argument('-s', "--stamp", required=False,
help='stamp file name to create after operation is done')
parser.add_argument('-d', "--destination", required=True,
help='directory to write the extracted file to')
parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("archive", help="input archive")
parser.add_argument("paths", nargs="*", help="paths to extract from the archive")
parser.add_argument("-s", "--stamp", required=False, help="stamp file name to create after operation is done")
parser.add_argument("-d", "--destination", required=True, help="directory to write the extracted file to")
args = parser.parse_args()
archive = pathlib.Path(args.archive)
@ -71,7 +67,7 @@ def main():
def extract_paths(file, paths):
for path in paths:
if path.endswith('/'):
if path.endswith("/"):
extract_directory(file, destination, path)
else:
extract_member(file, destination, path)
@ -92,5 +88,5 @@ def main():
return 0
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View file

@ -8,41 +8,41 @@ import sys
def main():
parser = argparse.ArgumentParser(description='Install vcpkg dependencies')
parser.add_argument('--cc', type=str, required=True, help='The C compiler to use')
parser.add_argument('--cxx', type=str, required=True, help='The C++ compiler to use')
parser.add_argument('--manifest', type=str, required=True, help='The vcpkg manifest to install')
parser.add_argument('--vcpkg', type=str, required=True, help='The path to the vcpkg executable')
parser.add_argument('--vcpkg-root', type=str, required=True, help='The path to the vcpkg root directory')
parser.add_argument('--vcpkg-triplet', type=str, required=True, help='The vcpkg triplet to use')
parser.add_argument('--vcpkg-overlay-triplets', type=str, help='Path to a vcpkg overlay triplets directory')
parser.add_argument('--vcpkg-binary-cache-dir', type=str, help='Path to a vcpkg binary cache directory')
parser.add_argument('--stamp-file', type=str, help='Path to a file to touch after installation')
parser.add_argument('install_directory', type=str, help='The directory to install vcpkg deps into')
parser = argparse.ArgumentParser(description="Install vcpkg dependencies")
parser.add_argument("--cc", type=str, required=True, help="The C compiler to use")
parser.add_argument("--cxx", type=str, required=True, help="The C++ compiler to use")
parser.add_argument("--manifest", type=str, required=True, help="The vcpkg manifest to install")
parser.add_argument("--vcpkg", type=str, required=True, help="The path to the vcpkg executable")
parser.add_argument("--vcpkg-root", type=str, required=True, help="The path to the vcpkg root directory")
parser.add_argument("--vcpkg-triplet", type=str, required=True, help="The vcpkg triplet to use")
parser.add_argument("--vcpkg-overlay-triplets", type=str, help="Path to a vcpkg overlay triplets directory")
parser.add_argument("--vcpkg-binary-cache-dir", type=str, help="Path to a vcpkg binary cache directory")
parser.add_argument("--stamp-file", type=str, help="Path to a file to touch after installation")
parser.add_argument("install_directory", type=str, help="The directory to install vcpkg deps into")
args = parser.parse_args()
manifest_directory = pathlib.Path(args.manifest).parent
env = os.environ.copy()
env['CC'] = args.cc
env['CXX'] = args.cxx
env["CC"] = args.cc
env["CXX"] = args.cxx
vcpkg_arguments = [
args.vcpkg,
'install',
'--no-print-usage',
'--x-wait-for-lock',
f'--triplet={args.vcpkg_triplet}',
f'--vcpkg-root={args.vcpkg_root}',
f'--x-manifest-root={manifest_directory}',
f'--x-install-root={args.install_directory}'
"install",
"--no-print-usage",
"--x-wait-for-lock",
f"--triplet={args.vcpkg_triplet}",
f"--vcpkg-root={args.vcpkg_root}",
f"--x-manifest-root={manifest_directory}",
f"--x-install-root={args.install_directory}",
]
if args.vcpkg_overlay_triplets:
vcpkg_arguments += [f'--overlay-triplets={args.vcpkg_overlay_triplets}']
vcpkg_arguments += [f"--overlay-triplets={args.vcpkg_overlay_triplets}"]
if args.vcpkg_binary_cache_dir:
binary_cache_dir = pathlib.Path(args.vcpkg_binary_cache_dir).absolute()
vcpkg_arguments += [f'--binarysource=clear;files,{binary_cache_dir},readwrite']
vcpkg_arguments += [f"--binarysource=clear;files,{binary_cache_dir},readwrite"]
subprocess.run(vcpkg_arguments, env=env, check=True)
@ -52,5 +52,5 @@ def main():
return 0
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View file

@ -5,4 +5,4 @@ import subprocess
import sys
# Prefix with ./ to run built binary, not arbitrary stuff from PATH.
sys.exit(subprocess.call(['./' + sys.argv[1]] + sys.argv[2:]))
sys.exit(subprocess.call(["./" + sys.argv[1]] + sys.argv[2:]))

View file

@ -39,72 +39,68 @@ import sys
def main():
parser = argparse.ArgumentParser(
epilog=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('input', help='input file')
parser.add_argument('values', nargs='*', help='several KEY=VALUE pairs')
parser.add_argument('-o', '--output', required=True,
help='output file')
parser = argparse.ArgumentParser(epilog=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("input", help="input file")
parser.add_argument("values", nargs="*", help="several KEY=VALUE pairs")
parser.add_argument("-o", "--output", required=True, help="output file")
args = parser.parse_args()
values = {}
for value in args.values:
key, val = value.split('=', 1)
key, val = value.split("=", 1)
if key in values:
print('duplicate key "%s" in args' % key, file=sys.stderr)
return 1
values[key] = val.replace('\\n', '\n')
values[key] = val.replace("\\n", "\n")
unused_values = set(values.keys())
# Matches e.g. '${FOO}' or '@FOO@' and captures FOO in group 1 or 2.
var_re = re.compile(r'\$\{([^}]*)\}|@([^@]*)@')
var_re = re.compile(r"\$\{([^}]*)\}|@([^@]*)@")
with open(args.input) as f:
in_lines = f.readlines()
out_lines = []
for in_line in in_lines:
def repl(m):
key = m.group(1) or m.group(2)
unused_values.discard(key)
return values[key]
in_line = var_re.sub(repl, in_line)
if in_line.startswith('#cmakedefine01 ') or in_line.startswith("# cmakedefine01"):
in_line = in_line.replace('# cmakedefine01', '#cmakedefine01')
if in_line.startswith("#cmakedefine01 ") or in_line.startswith("# cmakedefine01"):
in_line = in_line.replace("# cmakedefine01", "#cmakedefine01")
_, var = in_line.split()
if values[var] == '0':
if values[var] == "0":
print('error: "%s=0" used with #cmakedefine01 %s' % (var, var))
print(" '0' evaluates as truthy with #cmakedefine01")
print(' use "%s=" instead' % var)
return 1
in_line = '#define %s %d\n' % (var, 1 if values[var] else 0)
in_line = "#define %s %d\n" % (var, 1 if values[var] else 0)
unused_values.discard(var)
elif in_line.startswith('#cmakedefine '):
elif in_line.startswith("#cmakedefine "):
_, var = in_line.split(None, 1)
try:
var, val = var.split(None, 1)
in_line = '#define %s %s' % (var, val) # val ends in \n.
in_line = "#define %s %s" % (var, val) # val ends in \n.
except ValueError:
var = var.rstrip()
in_line = '#define %s\n' % var
in_line = "#define %s\n" % var
if not values[var]:
in_line = '/* #undef %s */\n' % var
in_line = "/* #undef %s */\n" % var
unused_values.discard(var)
out_lines.append(in_line)
if unused_values:
print('unused values args:', file=sys.stderr)
print(' ' + '\n '.join(unused_values), file=sys.stderr)
print("unused values args:", file=sys.stderr)
print(" " + "\n ".join(unused_values), file=sys.stderr)
return 1
output = ''.join(out_lines)
output = "".join(out_lines)
leftovers = var_re.findall(output)
if leftovers:
print(
'unprocessed values:\n',
'\n'.join([x[0] or x[1] for x in leftovers]),
file=sys.stderr)
print("unprocessed values:\n", "\n".join([x[0] or x[1] for x in leftovers]), file=sys.stderr)
return 1
def read(filename):
@ -112,10 +108,10 @@ def main():
return f.read()
if not os.path.exists(args.output) or read(args.output) != output:
with open(args.output, 'w') as f:
with open(args.output, "w") as f:
f.write(output)
os.chmod(args.output, os.stat(args.input).st_mode & 0o777)
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View file

@ -11,13 +11,13 @@ import re
import os
import sys
wpt_base_url = 'https://wpt.live/'
wpt_base_url = "https://wpt.live/"
class TestType(Enum):
TEXT = 1, 'Tests/LibWeb/Text/input/wpt-import', 'Tests/LibWeb/Text/expected/wpt-import'
REF = 2, 'Tests/LibWeb/Ref/input/wpt-import', 'Tests/LibWeb/Ref/expected/wpt-import'
CRASH = 3, 'Tests/LibWeb/Crash/wpt-import', ''
TEXT = 1, "Tests/LibWeb/Text/input/wpt-import", "Tests/LibWeb/Text/expected/wpt-import"
REF = 2, "Tests/LibWeb/Ref/input/wpt-import", "Tests/LibWeb/Ref/expected/wpt-import"
CRASH = 3, "Tests/LibWeb/Crash/wpt-import", ""
def __new__(cls, *args, **kwds):
obj = object.__new__(cls)
@ -29,7 +29,7 @@ class TestType(Enum):
self.expected_path = expected_path
PathMapping = namedtuple('PathMapping', ['source', 'destination'])
PathMapping = namedtuple("PathMapping", ["source", "destination"])
class ResourceType(Enum):
@ -115,8 +115,8 @@ def map_to_path(sources: list[ResourceAndType], is_resource=True, resource_path=
for source in sources:
base_directory = test_type.input_path if source.type == ResourceType.INPUT else test_type.expected_path
if source.resource.startswith('/') or not is_resource:
file_path = Path(base_directory, source.resource.lstrip('/'))
if source.resource.startswith("/") or not is_resource:
file_path = Path(base_directory, source.resource.lstrip("/"))
else:
# Add it as a sibling path if it's a relative resource
sibling_location = Path(resource_path).parent
@ -124,7 +124,7 @@ def map_to_path(sources: list[ResourceAndType], is_resource=True, resource_path=
file_path = Path(parent_directory, source.resource)
# Map to source and destination
output_path = wpt_base_url + str(file_path).replace(base_directory, '')
output_path = wpt_base_url + str(file_path).replace(base_directory, "")
filepaths.append(PathMapping(output_path, file_path.absolute()))
@ -136,12 +136,12 @@ def is_crash_test(url_string):
# A test file is treated as a crash test if they have -crash in their name before the file extension, or they are
# located in a folder named crashtests
parsed_url = urlparse(url_string)
path_segments = parsed_url.path.strip('/').split('/')
path_segments = parsed_url.path.strip("/").split("/")
if len(path_segments) > 1 and "crashtests" in path_segments[::-1]:
return True
file_name = path_segments[-1]
file_name_parts = file_name.split('.')
if len(file_name_parts) > 1 and any([part.endswith('-crash') for part in file_name_parts[:-1]]):
file_name_parts = file_name.split(".")
if len(file_name_parts) > 1 and any([part.endswith("-crash") for part in file_name_parts[:-1]]):
return True
return False
@ -152,28 +152,28 @@ def modify_sources(files, resources: list[ResourceAndType]) -> None:
folder_index = str(file).find(test_type.input_path)
if folder_index == -1:
folder_index = str(file).find(test_type.expected_path)
non_prefixed_path = str(file)[folder_index + len(test_type.expected_path):]
non_prefixed_path = str(file)[folder_index + len(test_type.expected_path) :]
else:
non_prefixed_path = str(file)[folder_index + len(test_type.input_path):]
non_prefixed_path = str(file)[folder_index + len(test_type.input_path) :]
parent_folder_count = len(Path(non_prefixed_path).parent.parts) - 1
parent_folder_path = '../' * parent_folder_count
parent_folder_path = "../" * parent_folder_count
with open(file, 'r') as f:
with open(file, "r") as f:
page_source = f.read()
# Iterate all scripts and overwrite the src attribute
for i, resource in enumerate(map(lambda r: r.resource, resources)):
if resource.startswith('/'):
if resource.startswith("/"):
new_src_value = parent_folder_path + resource[1::]
page_source = page_source.replace(resource, new_src_value)
# Look for mentions of the reference page, and update their href
if raw_reference_path is not None:
new_reference_path = parent_folder_path + '../../expected/wpt-import/' + reference_path[::]
new_reference_path = parent_folder_path + "../../expected/wpt-import/" + reference_path[::]
page_source = page_source.replace(raw_reference_path, new_reference_path)
with open(file, 'w') as f:
with open(file, "w") as f:
f.write(str(page_source))
@ -181,7 +181,7 @@ def download_files(filepaths):
downloaded_files = []
for file in filepaths:
source = urljoin(file.source, "/".join(file.source.split('/')[3:]))
source = urljoin(file.source, "/".join(file.source.split("/")[3:]))
destination = Path(os.path.normpath(file.destination))
if destination.exists():
@ -197,7 +197,7 @@ def download_files(filepaths):
os.makedirs(destination.parent, exist_ok=True)
with open(destination, 'wb') as f:
with open(destination, "wb") as f:
f.write(connection.read())
downloaded_files.append(destination)
@ -212,7 +212,7 @@ def create_expectation_files(files):
for file in files:
new_path = str(file.destination).replace(test_type.input_path, test_type.expected_path)
new_path = new_path.rsplit(".", 1)[0] + '.txt'
new_path = new_path.rsplit(".", 1)[0] + ".txt"
expected_file = Path(new_path)
if expected_file.exists():
@ -229,7 +229,7 @@ def main():
return
url_to_import = sys.argv[1]
resource_path = '/'.join(Path(url_to_import).parts[2::])
resource_path = "/".join(Path(url_to_import).parts[2::])
with urlopen(url_to_import) as response:
page = response.read().decode("utf-8")
@ -249,21 +249,23 @@ def main():
main_paths = map_to_path(main_file, False)
if test_type == TestType.REF and raw_reference_path is None:
raise RuntimeError('Failed to file reference path in ref test')
raise RuntimeError("Failed to file reference path in ref test")
if raw_reference_path is not None:
if raw_reference_path.startswith('/'):
if raw_reference_path.startswith("/"):
reference_path = raw_reference_path
main_paths.append(PathMapping(
wpt_base_url + raw_reference_path,
Path(test_type.expected_path + raw_reference_path).absolute()
))
main_paths.append(
PathMapping(
wpt_base_url + raw_reference_path, Path(test_type.expected_path + raw_reference_path).absolute()
)
)
else:
reference_path = Path(resource_path).parent.joinpath(raw_reference_path).__str__()
main_paths.append(PathMapping(
wpt_base_url + '/' + reference_path,
Path(test_type.expected_path + '/' + reference_path).absolute()
))
main_paths.append(
PathMapping(
wpt_base_url + "/" + reference_path, Path(test_type.expected_path + "/" + reference_path).absolute()
)
)
files_to_modify = download_files(main_paths)
create_expectation_files(main_paths)

View file

@ -54,7 +54,7 @@ def create_test(test_name: str, test_type: str, is_async: bool = False) -> None:
"""
if test_type == "Text":
input_boilerplate = fR"""<!DOCTYPE html>
input_boilerplate = Rf"""<!DOCTYPE html>
<script src="{path_to_include_js}"></script>
<script>
{f"asyncTest(async (done)" if is_async else "test(()"} => {{
@ -66,7 +66,7 @@ def create_test(test_name: str, test_type: str, is_async: bool = False) -> None:
expected_boilerplate = "Expected println() output\n"
elif test_type == "Ref":
input_boilerplate = fR"""<!DOCTYPE html>
input_boilerplate = Rf"""<!DOCTYPE html>
<head>
<link rel="match" href="{"../" * num_sub_levels}../expected/{Path(test_name).with_suffix("")}-ref.html" />
<style>

View file

@ -23,8 +23,9 @@ def main() -> int:
if not vcpkg_checkout.is_dir():
subprocess.check_call(args=["git", "clone", git_repo], cwd=build_dir)
else:
bootstrapped_vcpkg_version = subprocess.check_output(
["git", "-C", vcpkg_checkout, "rev-parse", "HEAD"]).strip().decode()
bootstrapped_vcpkg_version = (
subprocess.check_output(["git", "-C", vcpkg_checkout, "rev-parse", "HEAD"]).strip().decode()
)
if bootstrapped_vcpkg_version == git_rev:
return 0
@ -34,7 +35,7 @@ def main() -> int:
subprocess.check_call(args=["git", "fetch", "origin"], cwd=vcpkg_checkout)
subprocess.check_call(args=["git", "checkout", git_rev], cwd=vcpkg_checkout)
bootstrap_script = "bootstrap-vcpkg.bat" if os.name == 'nt' else "bootstrap-vcpkg.sh"
bootstrap_script = "bootstrap-vcpkg.bat" if os.name == "nt" else "bootstrap-vcpkg.sh"
subprocess.check_call(args=[vcpkg_checkout / bootstrap_script, "-disableMetrics"], cwd=vcpkg_checkout)
return 0