diff --git a/local-test-libxml2-delta-01/afc-libxml2/.editorconfig b/local-test-libxml2-delta-01/afc-libxml2/.editorconfig new file mode 100644 index 0000000000000000000000000000000000000000..f752fe1165274146d80eff1ece8f567b60a2ecea --- /dev/null +++ b/local-test-libxml2-delta-01/afc-libxml2/.editorconfig @@ -0,0 +1,11 @@ +# EditorConfig : http://EditorConfig.org + +root = true + +[*] +indent_style = space +indent_size = 4 +tab_width = 8 + +[Makefile*] +indent_style = tab diff --git a/local-test-libxml2-delta-01/afc-libxml2/.gitattributes b/local-test-libxml2-delta-01/afc-libxml2/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..a688634f96bf2d5717215ea6d05cee113b9f737e --- /dev/null +++ b/local-test-libxml2-delta-01/afc-libxml2/.gitattributes @@ -0,0 +1,2 @@ +/result/** -text +/test/** -text diff --git a/local-test-libxml2-delta-01/afc-libxml2/CMakeLists.txt b/local-test-libxml2-delta-01/afc-libxml2/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c2b0e08cfffbfbd1fa52716c03e29e552163023 --- /dev/null +++ b/local-test-libxml2-delta-01/afc-libxml2/CMakeLists.txt @@ -0,0 +1,723 @@ +cmake_minimum_required(VERSION 3.18) + +file(READ "VERSION" VERSION) +string(STRIP ${VERSION} VERSION) +if(${VERSION} MATCHES [[([0-9]+)\.([0-9]+)\.([0-9]+)]]) + set(LIBXML_MAJOR_VERSION ${CMAKE_MATCH_1}) + set(LIBXML_MINOR_VERSION ${CMAKE_MATCH_2}) + set(LIBXML_MICRO_VERSION ${CMAKE_MATCH_3}) +endif() + +project(libxml2 VERSION ${VERSION} LANGUAGES C) + +include(CheckCSourceCompiles) +include(CheckFunctionExists) +include(CheckIncludeFiles) +include(CheckLibraryExists) +include(CheckLinkerFlag) +include(CheckStructHasMember) +include(CheckSymbolExists) +include(CMakeDependentOption) +include(CMakePackageConfigHelpers) +include(FindPkgConfig) +include(GNUInstallDirs) + +option(BUILD_SHARED_LIBS "Build shared libraries" ON) +set(LIBXML2_WITH_AUTOMATA ON) +option(LIBXML2_WITH_CATALOG "Add the Catalog support" ON) +option(LIBXML2_WITH_DEBUG "Add the debugging module" ON) +set(LIBXML2_WITH_EXPR ON) +option(LIBXML2_WITH_HTML "Add the HTML support" ON) +option(LIBXML2_WITH_HTTP "Add the HTTP support" OFF) +option(LIBXML2_WITH_ICONV "Add ICONV support" ON) +option(LIBXML2_WITH_ICU "Add ICU support" OFF) +option(LIBXML2_WITH_ISO8859X "Add ISO8859X support if no iconv" ON) +option(LIBXML2_WITH_LEGACY "Add deprecated APIs for compatibility" OFF) +option(LIBXML2_WITH_LZMA "Use liblzma" OFF) +option(LIBXML2_WITH_MODULES "Add the dynamic modules support" ON) +option(LIBXML2_WITH_OUTPUT "Add the serialization support" ON) +option(LIBXML2_WITH_PATTERN "Add the xmlPattern selection interface" ON) +option(LIBXML2_WITH_PROGRAMS "Build programs" ON) +option(LIBXML2_WITH_PUSH "Add the PUSH parser interfaces" ON) +option(LIBXML2_WITH_PYTHON "Build Python bindings" ON) +option(LIBXML2_WITH_READLINE "readline support for xmllint shell" OFF) +option(LIBXML2_WITH_REGEXPS "Add Regular Expressions support" ON) +option(LIBXML2_WITH_SAX1 "Add the older SAX1 interface" ON) +option(LIBXML2_WITH_TESTS "Build tests" ON) +option(LIBXML2_WITH_THREADS "Add multithread support" ON) +option(LIBXML2_WITH_TLS "Enable thread-local storage" OFF) +set(LIBXML2_WITH_UNICODE ON) +option(LIBXML2_WITH_VALID "Add the DTD validation support" ON) +option(LIBXML2_WITH_XPATH "Add the XPATH support" ON) +option(LIBXML2_WITH_ZLIB "Use libz" OFF) + +cmake_dependent_option( + LIBXML2_WITH_C14N "Add the Canonicalization support" ON + "LIBXML2_WITH_OUTPUT;LIBXML2_WITH_XPATH" OFF) +cmake_dependent_option( + LIBXML2_WITH_HISTORY "history support for xmllint shell" OFF + "LIBXML2_WITH_READLINE" OFF) +cmake_dependent_option( + LIBXML2_WITH_READER "Add the xmlReader parsing interface" ON + "LIBXML2_WITH_PUSH" OFF) +cmake_dependent_option( + LIBXML2_WITH_SCHEMAS "Add Relax-NG and Schemas support" ON + "LIBXML2_WITH_PATTERN;LIBXML2_WITH_REGEXPS" OFF) +cmake_dependent_option( + LIBXML2_WITH_SCHEMATRON "Add Schematron support" ON + "LIBXML2_WITH_PATTERN;LIBXML2_WITH_XPATH" OFF) +cmake_dependent_option( + LIBXML2_WITH_THREAD_ALLOC "Add per-thread malloc hooks" OFF + "LIBXML2_WITH_THREADS" OFF) +cmake_dependent_option( + LIBXML2_WITH_WRITER "Add the xmlWriter saving interface" ON + "LIBXML2_WITH_OUTPUT;LIBXML2_WITH_PUSH" OFF) +cmake_dependent_option( + LIBXML2_WITH_XINCLUDE "Add the XInclude support" ON + "LIBXML2_WITH_XPATH" OFF) +cmake_dependent_option( + LIBXML2_WITH_XPTR "Add the XPointer support" ON + "LIBXML2_WITH_XPATH" OFF) + +set(LIBXML2_XMLCONF_WORKING_DIR ${CMAKE_CURRENT_BINARY_DIR} CACHE PATH "Working directory for XML Conformance Test Suite") + +if(LIBXML2_WITH_PYTHON) + find_package(Python COMPONENTS Interpreter Development REQUIRED) + #set(LIBXML2_PYTHON_INSTALL_DIR ${Python_SITEARCH} CACHE PATH "Python bindings install directory") + set(LIBXML2_PYTHON_INSTALL_DIR "${CMAKE_INSTALL_PREFIX}/python" + CACHE PATH "Python bindings install directory") +endif() + +foreach(VARIABLE IN ITEMS WITH_AUTOMATA WITH_C14N WITH_CATALOG WITH_DEBUG WITH_EXPR WITH_HTML WITH_HTTP WITH_ICONV WITH_ICU WITH_ISO8859X WITH_LEGACY WITH_LZMA WITH_MODULES WITH_OUTPUT WITH_PATTERN WITH_PUSH WITH_READER WITH_REGEXPS WITH_SAX1 WITH_SCHEMAS WITH_SCHEMATRON WITH_THREADS WITH_THREAD_ALLOC WITH_UNICODE WITH_VALID WITH_WRITER WITH_XINCLUDE WITH_XPATH WITH_XPTR WITH_ZLIB) + if(LIBXML2_${VARIABLE}) + set(${VARIABLE} 1) + else() + set(${VARIABLE} 0) + endif() +endforeach() + +set(LIBXML_VERSION ${VERSION}) +set(LIBXML_VERSION_EXTRA "") +math(EXPR LIBXML_VERSION_NUMBER " + ${LIBXML_MAJOR_VERSION} * 10000 + + ${LIBXML_MINOR_VERSION} * 100 + + ${LIBXML_MICRO_VERSION} +") + +set(MODULE_EXTENSION "${CMAKE_SHARED_LIBRARY_SUFFIX}") + +if(LIBXML2_WITH_ICONV) + find_package(Iconv REQUIRED) +endif() + +if(LIBXML2_WITH_ICU) + find_package(ICU REQUIRED COMPONENTS uc) +endif() + +if(LIBXML2_WITH_LZMA) + find_package(LibLZMA REQUIRED) +endif() + +if(LIBXML2_WITH_THREADS) + find_package(Threads REQUIRED) + set(THREAD_LIBS ${CMAKE_THREAD_LIBS_INIT}) + list(APPEND CMAKE_REQUIRED_LIBRARIES Threads::Threads) +endif() + +if(LIBXML2_WITH_ZLIB) + find_package(ZLIB REQUIRED) +endif() + +check_c_source_compiles(" + void __attribute__((destructor)) + f(void) {} + int main(void) { return 0; } +" HAVE_FUNC_ATTRIBUTE_DESTRUCTOR) +check_symbol_exists(getentropy "sys/random.h" HAVE_DECL_GETENTROPY) +check_symbol_exists(glob "glob.h" HAVE_DECL_GLOB) +check_symbol_exists(mmap "sys/mman.h" HAVE_DECL_MMAP) +check_include_files(stdint.h HAVE_STDINT_H) + +if(LIBXML2_WITH_READLINE) + check_library_exists(readline readline "" HAVE_LIBREADLINE) + if (LIBXML2_WITH_HISTORY) + check_library_exists(history append_history "" HAVE_LIBHISTORY) + endif() +endif() + +if(LIBXML2_WITH_HTTP) + check_include_files(poll.h HAVE_POLL_H) +endif() + +if(LIBXML2_WITH_TLS) + check_c_source_compiles( + "_Thread_local int v; int main(){return 0;}" + XML_THREAD_LOCAL_C11 + ) + if (XML_THREAD_LOCAL_C11) + set(XML_THREAD_LOCAL "_Thread_local") + else() + check_c_source_compiles( + "__thread int v; int main(){return 0;}" + XML_THREAD_LOCAL_THREAD + ) + if (XML_THREAD_LOCAL_THREAD) + set(XML_THREAD_LOCAL "__thread") + else() + check_c_source_compiles( + "__declspec(thread) int v; int main(){return 0;}" + XML_THREAD_LOCAL_DECLSPEC + ) + if (XML_THREAD_LOCAL_DECLSPEC) + set(XML_THREAD_LOCAL "__declspec(thread)") + endif() + endif() + endif() +endif() + +set( + LIBXML2_HDRS + include/libxml/c14n.h + include/libxml/catalog.h + include/libxml/chvalid.h + include/libxml/debugXML.h + include/libxml/dict.h + include/libxml/encoding.h + include/libxml/entities.h + include/libxml/globals.h + include/libxml/hash.h + include/libxml/HTMLparser.h + include/libxml/HTMLtree.h + include/libxml/list.h + include/libxml/nanoftp.h + include/libxml/nanohttp.h + include/libxml/parser.h + include/libxml/parserInternals.h + include/libxml/pattern.h + include/libxml/relaxng.h + include/libxml/SAX.h + include/libxml/SAX2.h + include/libxml/schemasInternals.h + include/libxml/schematron.h + include/libxml/threads.h + include/libxml/tree.h + include/libxml/uri.h + include/libxml/valid.h + include/libxml/xinclude.h + include/libxml/xlink.h + include/libxml/xmlIO.h + include/libxml/xmlautomata.h + include/libxml/xmlerror.h + include/libxml/xmlexports.h + include/libxml/xmlmemory.h + include/libxml/xmlmodule.h + include/libxml/xmlreader.h + include/libxml/xmlregexp.h + include/libxml/xmlsave.h + include/libxml/xmlschemas.h + include/libxml/xmlschemastypes.h + include/libxml/xmlstring.h + include/libxml/xmlunicode.h + include/libxml/xmlwriter.h + include/libxml/xpath.h + include/libxml/xpathInternals.h + include/libxml/xpointer.h +) + +set( + LIBXML2_SRCS + buf.c + chvalid.c + dict.c + encoding.c + entities.c + error.c + globals.c + hash.c + list.c + parser.c + parserInternals.c + SAX2.c + threads.c + tree.c + uri.c + valid.c + xmlIO.c + xmlmemory.c + xmlstring.c +) +if(LIBXML2_WITH_C14N) + list(APPEND LIBXML2_SRCS c14n.c) +endif() +if(LIBXML2_WITH_CATALOG) + list(APPEND LIBXML2_SRCS catalog.c) +endif() +if(LIBXML2_WITH_DEBUG) + list(APPEND LIBXML2_SRCS debugXML.c) +endif() +if(LIBXML2_WITH_HTML) + list(APPEND LIBXML2_SRCS HTMLparser.c HTMLtree.c) +endif() +if(LIBXML2_WITH_HTTP) + list(APPEND LIBXML2_SRCS nanohttp.c) +endif() +if(LIBXML2_WITH_LEGACY) + list(APPEND LIBXML2_SRCS legacy.c) +endif() +if(LIBXML2_WITH_LZMA) + list(APPEND LIBXML2_SRCS xzlib.c) +endif() +if(LIBXML2_WITH_MODULES) + list(APPEND LIBXML2_SRCS xmlmodule.c) +endif() +if(LIBXML2_WITH_OUTPUT) + list(APPEND LIBXML2_SRCS xmlsave.c) +endif() +if(LIBXML2_WITH_PATTERN) + list(APPEND LIBXML2_SRCS pattern.c) +endif() +if(LIBXML2_WITH_READER) + list(APPEND LIBXML2_SRCS xmlreader.c) +endif() +if(LIBXML2_WITH_REGEXPS) + list(APPEND LIBXML2_SRCS xmlregexp.c xmlunicode.c) +endif() +if(LIBXML2_WITH_SCHEMAS) + list(APPEND LIBXML2_SRCS relaxng.c xmlschemas.c xmlschemastypes.c) +endif() +if(LIBXML2_WITH_SCHEMATRON) + list(APPEND LIBXML2_SRCS schematron.c) +endif() +if(LIBXML2_WITH_WRITER) + list(APPEND LIBXML2_SRCS xmlwriter.c) +endif() +if(LIBXML2_WITH_XINCLUDE) + list(APPEND LIBXML2_SRCS xinclude.c) +endif() +if(LIBXML2_WITH_XPATH) + list(APPEND LIBXML2_SRCS xpath.c) +endif() +if(LIBXML2_WITH_XPTR) + list(APPEND LIBXML2_SRCS xlink.c xpointer.c) +endif() + +if(WIN32) + list(APPEND LIBXML2_SRCS win32/libxml2.rc) + file( + WRITE + ${CMAKE_CURRENT_BINARY_DIR}/rcVersion.h + "#define LIBXML_MAJOR_VERSION ${LIBXML_MAJOR_VERSION}\n" + "#define LIBXML_MINOR_VERSION ${LIBXML_MINOR_VERSION}\n" + "#define LIBXML_MICRO_VERSION ${LIBXML_MICRO_VERSION}\n" + "#define LIBXML_DOTTED_VERSION \"${VERSION}\"\n" + ) +endif() + +add_library(LibXml2 ${LIBXML2_HDRS} ${LIBXML2_SRCS}) +add_library(LibXml2::LibXml2 ALIAS LibXml2) + +target_include_directories( + LibXml2 + PUBLIC + $ + $ + $/${CMAKE_INSTALL_INCLUDEDIR}/libxml2> +) + +if(LIBXML2_WITH_MODULES) + check_library_exists(dl dlopen "" HAVE_DLOPEN) + if(HAVE_DLOPEN) + target_link_libraries(LibXml2 PRIVATE dl) + set(MODULE_LIBS "-ldl") + else() + check_library_exists(dld shl_load "" HAVE_SHLLOAD) + if(HAVE_SHLLOAD) + target_link_libraries(LibXml2 PRIVATE dld) + set(MODULE_LIBS "-ldld") + endif() + endif() +endif() + +if(UNIX) + target_link_libraries(LibXml2 PRIVATE m) + set(LIBM "-lm") +endif() + +if(WIN32) + target_link_libraries(LibXml2 PRIVATE bcrypt) + set(CRYPTO_LIBS "-lbcrypt") + if(LIBXML2_WITH_HTTP) + target_link_libraries(LibXml2 PRIVATE ws2_32) + set(WINSOCK_LIBS "-lws2_32") + endif() +endif() + +if(LIBXML2_WITH_ICONV) + target_link_libraries(LibXml2 PUBLIC Iconv::Iconv) + if(NOT Iconv_IS_BUILT_IN) + set(ICONV_LIBS "-liconv") + endif() +endif() + +if(LIBXML2_WITH_ICU) + target_link_libraries(LibXml2 PRIVATE ICU::uc) + set(ICU_LDFLAGS "-licuuc") + list(APPEND XML_PRIVATE_LIBS "${ICU_LDFLAGS}") + pkg_check_modules(ICU_PC IMPORTED_TARGET icu-uc) + if(ICU_PC_FOUND) + list(APPEND XML_PC_REQUIRES icu-uc) + else() + list(APPEND XML_PC_LIBS "${ICU_LDFLAGS}") + endif() +endif() + +if(LIBXML2_WITH_LZMA) + target_link_libraries(LibXml2 PRIVATE LibLZMA::LibLZMA) + set(LibLZMA_LDFLAGS "-llzma") + list(APPEND XML_PRIVATE_LIBS "${LibLZMA_LDFLAGS}") + pkg_check_modules(LibLZMA_PC IMPORTED_TARGET liblzma) + if(LibLZMA_PC_FOUND) + list(APPEND XML_PC_REQUIRES liblzma) + else() + list(APPEND XML_PC_LIBS "${LibLZMA_LDFLAGS}") + endif() +endif() + +if(LIBXML2_WITH_THREADS) + target_link_libraries(LibXml2 PRIVATE Threads::Threads) +endif() + +if(LIBXML2_WITH_ZLIB) + target_link_libraries(LibXml2 PRIVATE ZLIB::ZLIB) + set(ZLIB_LDFLAGS "-lz") + list(APPEND XML_PRIVATE_LIBS "${ZLIB_LDFLAGS}") + pkg_check_modules(ZLIB_PC IMPORTED_TARGET zlib) + if(ZLIB_PC_FOUND) + list(APPEND XML_PC_REQUIRES zlib) + else() + list(APPEND XML_PC_LIBS "${ZLIB_LDFLAGS}") + endif() +endif() + +if(CMAKE_C_COMPILER_ID MATCHES "Clang" OR CMAKE_C_COMPILER_ID STREQUAL "GNU") + # These compiler flags can break the checks above so keep them here. + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -pedantic -Wall -Wextra -Wshadow \ +-Wpointer-arith -Wcast-align -Wwrite-strings \ +-Wstrict-prototypes -Wmissing-prototypes \ +-Wno-long-long -Wno-format-extra-args -Wno-array-bounds") + + if(LIBXML2_WITH_LEGACY AND BUILD_SHARED_LIBS AND UNIX AND NOT APPLE) + check_linker_flag(C "LINKER:--undefined-version" FLAG_UNDEFINED_VERSION) + if (FLAG_UNDEFINED_VERSION) + target_link_options(LibXml2 PRIVATE "LINKER:--undefined-version") + endif() + target_link_options(LibXml2 PRIVATE "LINKER:--version-script=${CMAKE_CURRENT_SOURCE_DIR}/libxml2.syms") + endif() +endif() + +set(LIBXML_MINOR_COMPAT 0) +math(EXPR LIBXML_SOVERSION "${LIBXML_MAJOR_VERSION} + ${LIBXML_MINOR_COMPAT}") +set_target_properties( + LibXml2 + PROPERTIES + IMPORT_PREFIX lib + OUTPUT_NAME xml2 + POSITION_INDEPENDENT_CODE ON + PREFIX lib + VERSION ${PROJECT_VERSION} + SOVERSION ${LIBXML_SOVERSION} +) + +if(MSVC) + if(BUILD_SHARED_LIBS) + set_target_properties( + LibXml2 + PROPERTIES + DEBUG_POSTFIX d + ) + else() + set_target_properties( + LibXml2 + PROPERTIES + DEBUG_POSTFIX sd + MINSIZEREL_POSTFIX s + RELEASE_POSTFIX s + RELWITHDEBINFO_POSTFIX s + ) + endif() +endif() + +set(XML_SYSCONFDIR "${CMAKE_INSTALL_FULL_SYSCONFDIR}") + +install(FILES ${LIBXML2_HDRS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libxml2/libxml COMPONENT development) + +install( + TARGETS LibXml2 + EXPORT LibXml2 + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT development + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} COMPONENT runtime NAMELINK_COMPONENT development + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT runtime +) + +if(MSVC AND BUILD_SHARED_LIBS) + install(FILES $ DESTINATION ${CMAKE_INSTALL_BINDIR} CONFIGURATIONS Debug RelWithDebInfo COMPONENT debug) +endif() + +if(LIBXML2_WITH_PROGRAMS) + add_executable(xmllint xmllint.c shell.c) + set(PROGRAMS xmllint) + if(LIBXML2_WITH_CATALOG AND LIBXML2_WITH_OUTPUT) + add_executable(xmlcatalog xmlcatalog.c) + list(APPEND PROGRAMS xmlcatalog) + endif() + foreach(PROGRAM ${PROGRAMS}) + add_executable(LibXml2::${PROGRAM} ALIAS ${PROGRAM}) + target_link_libraries(${PROGRAM} LibXml2) + if(HAVE_LIBHISTORY) + target_link_libraries(${PROGRAM} history) + endif() + if(HAVE_LIBREADLINE) + target_link_libraries(${PROGRAM} readline) + endif() + install(TARGETS ${PROGRAM} EXPORT LibXml2 RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT programs) + endforeach() +endif() + +if(LIBXML2_WITH_TESTS) + enable_testing() + set( + TESTS + runtest + runxmlconf + runsuite + testapi + testchar + testdict + testModule + testlimits + testparser + testrecurse + ) + foreach(TEST ${TESTS}) + add_executable(${TEST} ${TEST}.c) + target_link_libraries(${TEST} LibXml2) + endforeach() + if(Threads_FOUND) + foreach(TEST runtest) + target_link_libraries(${TEST} Threads::Threads) + endforeach() + endif() + add_test(NAME runtest COMMAND runtest --out ${CMAKE_CURRENT_BINARY_DIR} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + add_test(NAME runsuite COMMAND runsuite WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + if(EXISTS ${LIBXML2_XMLCONF_WORKING_DIR}/xmlconf/xmlconf.xml) + add_test(NAME runxmlconf COMMAND runxmlconf WORKING_DIRECTORY ${LIBXML2_XMLCONF_WORKING_DIR}) + endif() + if(NOT WIN32) + add_test(NAME testapi COMMAND testapi) + endif() + add_test(NAME testchar COMMAND testchar) + add_test(NAME testdict COMMAND testdict) + add_test(NAME testparser COMMAND testparser WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) + add_test(NAME testrecurse COMMAND testrecurse WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) +endif() + +if(LIBXML2_WITH_PYTHON) + execute_process( + COMMAND + ${Python_EXECUTABLE} + ${CMAKE_CURRENT_SOURCE_DIR}/python/generator.py + ${CMAKE_CURRENT_SOURCE_DIR}/doc/libxml2-api.xml + ${CMAKE_CURRENT_SOURCE_DIR}/python/libxml2-python-api.xml + WORKING_DIRECTORY + ${CMAKE_CURRENT_BINARY_DIR} + ) + file(READ python/libxml.py LIBXML_PY) + file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/libxml2.py.in "${LIBXML_PY}") + file(READ ${CMAKE_CURRENT_BINARY_DIR}/libxml2class.py LIBXML2CLASS_PY) + file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/libxml2.py.in "${LIBXML2CLASS_PY}") + configure_file(${CMAKE_CURRENT_BINARY_DIR}/libxml2.py.in libxml2.py COPYONLY) + add_library( + LibXml2Mod + libxml2-py.c + libxml2-py.h + python/libxml.c + python/libxml_wrap.h + python/types.c + ) + target_include_directories( + LibXml2Mod + PUBLIC + $ + ) + target_link_libraries(LibXml2Mod LibXml2 Python::Python) + set_target_properties( + LibXml2Mod + PROPERTIES + IMPORT_PREFIX lib + OUTPUT_NAME xml2mod + PREFIX lib + VERSION ${PROJECT_VERSION} + ) + if (WIN32) + set_target_properties(LibXml2Mod PROPERTIES SUFFIX ".pyd") + endif() + install( + TARGETS LibXml2Mod + ARCHIVE DESTINATION ${LIBXML2_PYTHON_INSTALL_DIR} COMPONENT development + LIBRARY DESTINATION ${LIBXML2_PYTHON_INSTALL_DIR} COMPONENT runtime NAMELINK_COMPONENT development + RUNTIME DESTINATION ${LIBXML2_PYTHON_INSTALL_DIR} COMPONENT runtime + ) + if(MSVC AND BUILD_SHARED_LIBS) + install(FILES $ DESTINATION ${LIBXML2_PYTHON_INSTALL_DIR} CONFIGURATIONS Debug RelWithDebInfo COMPONENT debug) + endif() + install(FILES python/drv_libxml2.py DESTINATION ${LIBXML2_PYTHON_INSTALL_DIR} COMPONENT runtime) + install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libxml2.py DESTINATION ${LIBXML2_PYTHON_INSTALL_DIR} COMPONENT runtime) +endif() + +install(FILES doc/xml2-config.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 COMPONENT documentation) +if(LIBXML2_WITH_PROGRAMS) + install(FILES doc/xmlcatalog.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 COMPONENT documentation) + install(FILES doc/xmllint.1 DESTINATION ${CMAKE_INSTALL_MANDIR}/man1 COMPONENT documentation) +endif() +install(DIRECTORY doc/ DESTINATION ${CMAKE_INSTALL_DOCDIR} COMPONENT documentation + PATTERN "Makefile.*" EXCLUDE + PATTERN "meson.build" EXCLUDE + PATTERN "*.1" EXCLUDE + PATTERN "*.py" EXCLUDE + PATTERN "*.res" EXCLUDE + PATTERN "*.xml" EXCLUDE + PATTERN "*.xsl" EXCLUDE) + +configure_package_config_file( + libxml2-config.cmake.cmake.in libxml2-config.cmake + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libxml2-${PROJECT_VERSION} +) + +install( + FILES ${CMAKE_CURRENT_BINARY_DIR}/libxml2-config.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libxml2-${PROJECT_VERSION} + COMPONENT development +) + +write_basic_package_version_file( + ${CMAKE_CURRENT_BINARY_DIR}/libxml2-config-version.cmake + VERSION ${PROJECT_VERSION} + COMPATIBILITY SameMajorVersion +) + +install( + FILES ${CMAKE_CURRENT_BINARY_DIR}/libxml2-config-version.cmake + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libxml2-${PROJECT_VERSION} + COMPONENT development +) + +install( + EXPORT LibXml2 + DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/libxml2-${PROJECT_VERSION} + NAMESPACE LibXml2:: + FILE libxml2-export.cmake + COMPONENT development +) + +configure_file(config.h.cmake.in config.h) +configure_file(include/libxml/xmlversion.h.in libxml/xmlversion.h) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libxml/xmlversion.h DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/libxml2/libxml COMPONENT development) + +if(LIBXML2_WITH_PYTHON) + set(prefix "${CMAKE_INSTALL_PREFIX}") + configure_file(python/setup.py.in setup.py @ONLY) +endif() + +set(NON_PC_LIBS "${THREAD_LIBS} ${ICONV_LIBS} ${LIBM} ${WINSOCK_LIBS} ${CRYPTO_LIBS} ${MODULE_LIBS}") +list(APPEND XML_PC_LIBS "${NON_PC_LIBS}") +list(APPEND XML_PRIVATE_LIBS "${NON_PC_LIBS}") +list(REMOVE_DUPLICATES XML_PC_LIBS) +list(REMOVE_DUPLICATES XML_PRIVATE_LIBS) + +list(JOIN XML_PC_REQUIRES " " XML_PC_REQUIRES) +list(JOIN XML_PC_LIBS " " XML_PC_LIBS) +list(JOIN XML_PRIVATE_LIBS " " XML_PRIVATE_LIBS) + +set(XML_INCLUDEDIR "-I\${includedir}/libxml2") +set(XML_LIBDIR "-L\${libdir}") +set(XML_LIBS "-lxml2") + +if(BUILD_SHARED_LIBS) + set(XML_PC_PRIVATE ".private") + set(XML_PC_LIBS_PRIVATE " +Libs.private:") +else() + set(XML_PRIVATE_LIBS_NO_SHARED "${XML_PRIVATE_LIBS}") +endif() + +if(WIN32) + set(XML_STATIC_CFLAGS "-DLIBXML_STATIC") + if (BUILD_SHARED_LIBS) + set(XML_PC_CFLAGS_PRIVATE " +Cflags.private:") + else() + target_compile_definitions(LibXml2 PUBLIC LIBXML_STATIC) + set(XML_CFLAGS "${XML_STATIC_CFLAGS}") + endif() +endif() + +file(RELATIVE_PATH PACKAGE_RELATIVE_PATH "${CMAKE_INSTALL_FULL_LIBDIR}/pkgconfig" "${CMAKE_INSTALL_PREFIX}") +string(REGEX REPLACE "/$" "" PACKAGE_RELATIVE_PATH "${PACKAGE_RELATIVE_PATH}") + +if(WIN32) + set(prefix "\${pcfiledir}/${PACKAGE_RELATIVE_PATH}") +else() + set(prefix "${CMAKE_INSTALL_PREFIX}") +endif() +set(exec_prefix "\${prefix}") +set(libdir "\${prefix}/${CMAKE_INSTALL_LIBDIR}") +set(includedir "\${prefix}/${CMAKE_INSTALL_INCLUDEDIR}") +configure_file(libxml-2.0.pc.in libxml-2.0.pc @ONLY) +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/libxml-2.0.pc DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig COMPONENT development) + +if(WIN32) + set(prefix "\$(cd \"\$(dirname \"\$0\")\"; pwd -P)/..") +endif() +configure_file(xml2-config.in xml2-config @ONLY) +install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/xml2-config DESTINATION ${CMAKE_INSTALL_BINDIR} COMPONENT development) + +set(XML_INCLUDEDIR "-I${CMAKE_INSTALL_FULL_INCLUDEDIR}/libxml2") +set(XML_LIBDIR "-L${CMAKE_INSTALL_FULL_LIBDIR}") + +set(CPACK_COMPONENT_DEVELOPMENT_DEPENDS runtime) +set(CPACK_COMPONENT_PROGRAMS_DEPENDS runtime) +set(CPACK_DEB_COMPONENT_INSTALL ON) +set(CPACK_DEBIAN_DEVELOPMENT_PACKAGE_DEPENDS "${PACKAGE_TARNAME}") +set(CPACK_DEBIAN_DEVELOPMENT_PACKAGE_NAME "${PACKAGE_TARNAME}-dev") +set(CPACK_DEBIAN_DEVELOPMENT_PACKAGE_SECTION "libdevel") +set(CPACK_DEBIAN_PACKAGE_HOMEPAGE ${PACKAGE_URL}) +set(CPACK_DEBIAN_PACKAGE_NAME ${PACKAGE_TARNAME}) +set(CPACK_DEBIAN_PACKAGE_SECTION "devel") +set(CPACK_DEBIAN_PROGRAMS_PACKAGE_DEPENDS "${PACKAGE_TARNAME}") +set(CPACK_DEBIAN_PROGRAMS_PACKAGE_NAME "${PACKAGE_TARNAME}-utils") +set(CPACK_DEBIAN_PROGRAMS_PACKAGE_SECTION "utils") +set(CPACK_DEBIAN_RUNTIME_PACKAGE_NAME ${PACKAGE_TARNAME}) +set(CPACK_DEBIAN_RUNTIME_PACKAGE_RECOMMENDS "${PACKAGE_TARNAME}-utils") +set(CPACK_DEBIAN_RUNTIME_PACKAGE_SECTION "libs") +set(CPACK_NSIS_PACKAGE_NAME ${PACKAGE_STRING}) +set(CPACK_NSIS_URL_INFO_ABOUT ${PACKAGE_URL}) +set(CPACK_PACKAGE_DISPLAY_NAME ${PACKAGE_STRING}) +set(CPACK_PACKAGE_INSTALL_DIRECTORY "${PACKAGE_TARNAME}-${PACKAGE_VERSION}") +set(CPACK_PACKAGE_NAME ${PACKAGE_TARNAME}) +set(CPACK_PACKAGE_VERSION ${PACKAGE_VERSION}) +set(CPACK_PACKAGE_VERSION_MAJOR ${LIBXML_MAJOR_VERSION}) +set(CPACK_PACKAGE_VERSION_MINOR ${LIBXML_MINOR_VERSION}) +set(CPACK_PACKAGE_VERSION_PATCH ${LIBXML_MICRO_VERSION}) +set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_CURRENT_SOURCE_DIR}/Copyright) +set(CPACK_RPM_COMPONENT_INSTALL ON) +set(CPACK_RPM_development_PACKAGE_NAME "${PACKAGE_NAME}-devel") +set(CPACK_RPM_development_PACKAGE_REQUIRES "${PACKAGE_NAME}") +set(CPACK_RPM_PACKAGE_GROUP "Development/Libraries") +set(CPACK_RPM_PACKAGE_NAME ${PACKAGE_TARNAME}) +set(CPACK_RPM_PACKAGE_URL ${PACKAGE_URL}) +set(CPACK_RPM_programs_PACKAGE_NAME "${PACKAGE_NAME}-utils") +set(CPACK_RPM_programs_PACKAGE_REQUIRES "${PACKAGE_NAME}") +set(CPACK_RPM_runtime_PACKAGE_NAME "${PACKAGE_NAME}") +set(CPACK_RPM_runtime_PACKAGE_SUGGESTS "${PACKAGE_NAME}-utils") + +include(CPack) diff --git a/local-test-libxml2-delta-01/afc-libxml2/HTMLparser.c b/local-test-libxml2-delta-01/afc-libxml2/HTMLparser.c new file mode 100644 index 0000000000000000000000000000000000000000..9c34e9e848271a80e42c8ab6c18dd4fd11f32e1f --- /dev/null +++ b/local-test-libxml2-delta-01/afc-libxml2/HTMLparser.c @@ -0,0 +1,6645 @@ +/* + * HTMLparser.c : an HTML parser + * + * References: + * HTML Living Standard + * https://html.spec.whatwg.org/multipage/parsing.html + * + * Tokenization now conforms to HTML5. Tree construction still follows + * a custom, non-standard implementation. See: + * + * https://gitlab.gnome.org/GNOME/libxml2/-/issues/211 + * + * See Copyright for the status of this software. + * + * daniel@veillard.com + */ + +#define IN_LIBXML +#include "libxml.h" +#ifdef LIBXML_HTML_ENABLED + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "private/buf.h" +#include "private/dict.h" +#include "private/enc.h" +#include "private/error.h" +#include "private/html.h" +#include "private/io.h" +#include "private/parser.h" +#include "private/tree.h" + +#define HTML_MAX_NAMELEN 1000 +#define HTML_PARSER_BIG_BUFFER_SIZE 1000 +#define HTML_PARSER_BUFFER_SIZE 100 + +#define IS_WS_HTML(c) \ + (((c) == 0x20) || \ + (((c) >= 0x09) && ((c) <= 0x0D) && ((c) != 0x0B))) + +#define IS_HEX_DIGIT(c) \ + ((IS_ASCII_DIGIT(c)) || \ + ((((c) | 0x20) >= 'a') && (((c) | 0x20) <= 'f'))) + +#define IS_UPPER(c) \ + (((c) >= 'A') && ((c) <= 'Z')) + +#define IS_ALNUM(c) \ + (IS_ASCII_LETTER(c) || IS_ASCII_DIGIT(c)) + +typedef const unsigned htmlAsciiMask[2]; + +static htmlAsciiMask MASK_DQ = { + 0, + 1u << ('"' - 32), +}; +static htmlAsciiMask MASK_SQ = { + 0, + 1u << ('\'' - 32), +}; +static htmlAsciiMask MASK_GT = { + 0, + 1u << ('>' - 32), +}; +static htmlAsciiMask MASK_DASH = { + 0, + 1u << ('-' - 32), +}; +static htmlAsciiMask MASK_WS_GT = { + 1u << 0x09 | 1u << 0x0A | 1u << 0x0C | 1u << 0x0D, + 1u << (' ' - 32) | 1u << ('>' - 32), +}; +static htmlAsciiMask MASK_DQ_GT = { + 0, + 1u << ('"' - 32) | 1u << ('>' - 32), +}; +static htmlAsciiMask MASK_SQ_GT = { + 0, + 1u << ('\'' - 32) | 1u << ('>' - 32), +}; + +static int htmlOmittedDefaultValue = 1; + +static int +htmlParseElementInternal(htmlParserCtxtPtr ctxt); + +/************************************************************************ + * * + * Some factorized error routines * + * * + ************************************************************************/ + +/** + * htmlErrMemory: + * @ctxt: an HTML parser context + * @extra: extra information + * + * Handle a redefinition of attribute error + */ +static void +htmlErrMemory(xmlParserCtxtPtr ctxt) +{ + xmlCtxtErrMemory(ctxt); +} + +/** + * htmlParseErr: + * @ctxt: an HTML parser context + * @error: the error number + * @msg: the error message + * @str1: string infor + * @str2: string infor + * + * Handle a fatal parser error, i.e. violating Well-Formedness constraints + */ +static void LIBXML_ATTR_FORMAT(3,0) +htmlParseErr(xmlParserCtxtPtr ctxt, xmlParserErrors error, + const char *msg, const xmlChar *str1, const xmlChar *str2) +{ + xmlCtxtErr(ctxt, NULL, XML_FROM_HTML, error, XML_ERR_ERROR, + str1, str2, NULL, 0, msg, str1, str2); +} + +/************************************************************************ + * * + * Parser stacks related functions and macros * + * * + ************************************************************************/ + +/** + * htmlnamePush: + * @ctxt: an HTML parser context + * @value: the element name + * + * Pushes a new element name on top of the name stack + * + * Returns -1 in case of error, the index in the stack otherwise + */ +static int +htmlnamePush(htmlParserCtxtPtr ctxt, const xmlChar * value) +{ + if ((ctxt->html < 3) && (xmlStrEqual(value, BAD_CAST "head"))) + ctxt->html = 3; + if ((ctxt->html < 10) && (xmlStrEqual(value, BAD_CAST "body"))) + ctxt->html = 10; + if (ctxt->nameNr >= ctxt->nameMax) { + size_t newSize = ctxt->nameMax * 2; + const xmlChar **tmp; + + tmp = xmlRealloc((xmlChar **) ctxt->nameTab, + newSize * sizeof(ctxt->nameTab[0])); + if (tmp == NULL) { + htmlErrMemory(ctxt); + return (-1); + } + ctxt->nameTab = tmp; + ctxt->nameMax = newSize; + } + ctxt->nameTab[ctxt->nameNr] = value; + ctxt->name = value; + return (ctxt->nameNr++); +} +/** + * htmlnamePop: + * @ctxt: an HTML parser context + * + * Pops the top element name from the name stack + * + * Returns the name just removed + */ +static const xmlChar * +htmlnamePop(htmlParserCtxtPtr ctxt) +{ + const xmlChar *ret; + + if (ctxt->nameNr <= 0) + return (NULL); + ctxt->nameNr--; + if (ctxt->nameNr < 0) + return (NULL); + if (ctxt->nameNr > 0) + ctxt->name = ctxt->nameTab[ctxt->nameNr - 1]; + else + ctxt->name = NULL; + ret = ctxt->nameTab[ctxt->nameNr]; + ctxt->nameTab[ctxt->nameNr] = NULL; + return (ret); +} + +/** + * htmlNodeInfoPush: + * @ctxt: an HTML parser context + * @value: the node info + * + * Pushes a new element name on top of the node info stack + * + * Returns 0 in case of error, the index in the stack otherwise + */ +static int +htmlNodeInfoPush(htmlParserCtxtPtr ctxt, htmlParserNodeInfo *value) +{ + if (ctxt->nodeInfoNr >= ctxt->nodeInfoMax) { + if (ctxt->nodeInfoMax == 0) + ctxt->nodeInfoMax = 5; + ctxt->nodeInfoMax *= 2; + ctxt->nodeInfoTab = (htmlParserNodeInfo *) + xmlRealloc((htmlParserNodeInfo *)ctxt->nodeInfoTab, + ctxt->nodeInfoMax * + sizeof(ctxt->nodeInfoTab[0])); + if (ctxt->nodeInfoTab == NULL) { + htmlErrMemory(ctxt); + return (0); + } + } + ctxt->nodeInfoTab[ctxt->nodeInfoNr] = *value; + ctxt->nodeInfo = &ctxt->nodeInfoTab[ctxt->nodeInfoNr]; + return (ctxt->nodeInfoNr++); +} + +/** + * htmlNodeInfoPop: + * @ctxt: an HTML parser context + * + * Pops the top element name from the node info stack + * + * Returns 0 in case of error, the pointer to NodeInfo otherwise + */ +static htmlParserNodeInfo * +htmlNodeInfoPop(htmlParserCtxtPtr ctxt) +{ + if (ctxt->nodeInfoNr <= 0) + return (NULL); + ctxt->nodeInfoNr--; + if (ctxt->nodeInfoNr < 0) + return (NULL); + if (ctxt->nodeInfoNr > 0) + ctxt->nodeInfo = &ctxt->nodeInfoTab[ctxt->nodeInfoNr - 1]; + else + ctxt->nodeInfo = NULL; + return &ctxt->nodeInfoTab[ctxt->nodeInfoNr]; +} + +/* + * Macros for accessing the content. Those should be used only by the parser, + * and not exported. + * + * Dirty macros, i.e. one need to make assumption on the context to use them + * + * CUR_PTR return the current pointer to the xmlChar to be parsed. + * CUR returns the current xmlChar value, i.e. a 8 bit value if compiled + * in ISO-Latin or UTF-8, and the current 16 bit value if compiled + * in UNICODE mode. This should be used internally by the parser + * only to compare to ASCII values otherwise it would break when + * running with UTF-8 encoding. + * NXT(n) returns the n'th next xmlChar. Same as CUR is should be used only + * to compare on ASCII based substring. + * UPP(n) returns the n'th next xmlChar converted to uppercase. Same as CUR + * it should be used only to compare on ASCII based substring. + * SKIP(n) Skip n xmlChar, and must also be used only to skip ASCII defined + * strings without newlines within the parser. + * + * Clean macros, not dependent of an ASCII context, expect UTF-8 encoding + * + * COPY(to) copy one char to *to, increment CUR_PTR and to accordingly + */ + +#define UPPER (toupper(*ctxt->input->cur)) + +#define SKIP(val) ctxt->input->cur += (val),ctxt->input->col+=(val) + +#define NXT(val) ctxt->input->cur[(val)] + +#define UPP(val) (toupper(ctxt->input->cur[(val)])) + +#define CUR_PTR ctxt->input->cur +#define BASE_PTR ctxt->input->base + +#define SHRINK \ + if ((!PARSER_PROGRESSIVE(ctxt)) && \ + (ctxt->input->cur - ctxt->input->base > 2 * INPUT_CHUNK) && \ + (ctxt->input->end - ctxt->input->cur < 2 * INPUT_CHUNK)) \ + xmlParserShrink(ctxt); + +#define GROW \ + if ((!PARSER_PROGRESSIVE(ctxt)) && \ + (ctxt->input->end - ctxt->input->cur < INPUT_CHUNK)) \ + xmlParserGrow(ctxt); + +#define SKIP_BLANKS htmlSkipBlankChars(ctxt) + +/* Imported from XML */ + +#define CUR (*ctxt->input->cur) + +/** + * htmlFindEncoding: + * @the HTML parser context + * + * Ty to find and encoding in the current data available in the input + * buffer this is needed to try to switch to the proper encoding when + * one face a character error. + * That's an heuristic, since it's operating outside of parsing it could + * try to use a meta which had been commented out, that's the reason it + * should only be used in case of error, not as a default. + * + * Returns an encoding string or NULL if not found, the string need to + * be freed + */ +static xmlChar * +htmlFindEncoding(xmlParserCtxtPtr ctxt) { + const xmlChar *start, *cur, *end; + xmlChar *ret; + + if ((ctxt == NULL) || (ctxt->input == NULL) || + (ctxt->input->flags & XML_INPUT_HAS_ENCODING)) + return(NULL); + if ((ctxt->input->cur == NULL) || (ctxt->input->end == NULL)) + return(NULL); + + start = ctxt->input->cur; + end = ctxt->input->end; + /* we also expect the input buffer to be zero terminated */ + if (*end != 0) + return(NULL); + + cur = xmlStrcasestr(start, BAD_CAST "HTTP-EQUIV"); + if (cur == NULL) + return(NULL); + cur = xmlStrcasestr(cur, BAD_CAST "CONTENT"); + if (cur == NULL) + return(NULL); + cur = xmlStrcasestr(cur, BAD_CAST "CHARSET="); + if (cur == NULL) + return(NULL); + cur += 8; + start = cur; + while ((IS_ALNUM(*cur)) || + (*cur == '-') || (*cur == '_') || (*cur == ':') || (*cur == '/')) + cur++; + if (cur == start) + return(NULL); + ret = xmlStrndup(start, cur - start); + if (ret == NULL) + htmlErrMemory(ctxt); + return(ret); +} + +static int +htmlMaskMatch(htmlAsciiMask mask, unsigned c) { + if (c >= 64) + return(0); + return((mask[c/32] >> (c & 31)) & 1); +} + +static int +htmlValidateUtf8(xmlParserCtxtPtr ctxt, const xmlChar *str, size_t len) { + unsigned c = str[0]; + int size; + + if (c < 0xC2) { + goto invalid; + } else if (c < 0xE0) { + if (len < 2) + goto incomplete; + if ((str[1] & 0xC0) != 0x80) + goto invalid; + size = 2; + } else if (c < 0xF0) { + unsigned v; + + if (len < 3) + goto incomplete; + + v = str[1] << 8 | str[2]; /* hint to generate 16-bit load */ + v |= c << 16; + + if (((v & 0x00C0C0) != 0x008080) || + ((v & 0x0F2000) == 0x000000) || + ((v & 0x0F2000) == 0x0D2000)) + goto invalid; + + size = 3; + } else { + unsigned v; + + if (len < 4) + goto incomplete; + + v = c << 24 | str[1] << 16 | str[2] << 8 | str[3]; + + if (((v & 0x00C0C0C0) != 0x00808080) || + (v < 0xF0900000) || (v >= 0xF4900000)) + goto invalid; + + size = 4; + } + + return(size); + +incomplete: + return(0); + +invalid: + /* Only report the first error */ + if ((ctxt->input->flags & XML_INPUT_ENCODING_ERROR) == 0) { + htmlParseErr(ctxt, XML_ERR_INVALID_ENCODING, + "Invalid bytes in character encoding", NULL, NULL); + ctxt->input->flags |= XML_INPUT_ENCODING_ERROR; + } + + return(-1); +} + +/** + * htmlSkipBlankChars: + * @ctxt: the HTML parser context + * + * skip all blanks character found at that point in the input streams. + * + * Returns the number of space chars skipped + */ + +static int +htmlSkipBlankChars(xmlParserCtxtPtr ctxt) { + const xmlChar *cur = ctxt->input->cur; + size_t avail = ctxt->input->end - cur; + int res = 0; + int line = ctxt->input->line; + int col = ctxt->input->col; + + while (!PARSER_STOPPED(ctxt)) { + if (avail == 0) { + ctxt->input->cur = cur; + GROW; + cur = ctxt->input->cur; + avail = ctxt->input->end - cur; + + if (avail == 0) + break; + } + + if (*cur == '\n') { + line++; + col = 1; + } else if (IS_WS_HTML(*cur)) { + col++; + } else { + break; + } + + cur += 1; + avail -= 1; + + if (res < INT_MAX) + res++; + } + + ctxt->input->cur = cur; + ctxt->input->line = line; + ctxt->input->col = col; + + if (res > 8) + GROW; + + return(res); +} + + + +/************************************************************************ + * * + * The list of HTML elements and their properties * + * * + ************************************************************************/ + +/* + * Start Tag: 1 means the start tag can be omitted + * End Tag: 1 means the end tag can be omitted + * 2 means it's forbidden (empty elements) + * 3 means the tag is stylistic and should be closed easily + * Depr: this element is deprecated + * DTD: 1 means that this element is valid only in the Loose DTD + * 2 means that this element is valid only in the Frameset DTD + * + * Name,Start Tag,End Tag,Save End,Empty,Deprecated,DTD,inline,Description + */ + +#define DATA_RCDATA 1 +#define DATA_RAWTEXT 2 +#define DATA_PLAINTEXT 3 +#define DATA_SCRIPT 4 +#define DATA_SCRIPT_ESC1 5 +#define DATA_SCRIPT_ESC2 6 + +static const htmlElemDesc +html40ElementTable[] = { +{ "a", 0, 0, 0, 0, 0, 0, 1, "anchor ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "abbr", 0, 0, 0, 0, 0, 0, 1, "abbreviated form", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "acronym", 0, 0, 0, 0, 0, 0, 1, "", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "address", 0, 0, 0, 0, 0, 0, 0, "information on author ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "applet", 0, 0, 0, 0, 1, 1, 2, "java applet ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "area", 0, 2, 2, 1, 0, 0, 0, "client-side image map area ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "b", 0, 3, 0, 0, 0, 0, 1, "bold text style", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "base", 0, 2, 2, 1, 0, 0, 0, "document base uri ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "basefont", 0, 2, 2, 1, 1, 1, 1, "base font size " , + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "bdo", 0, 0, 0, 0, 0, 0, 1, "i18n bidi over-ride ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "big", 0, 3, 0, 0, 0, 0, 1, "large text style", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "blockquote", 0, 0, 0, 0, 0, 0, 0, "long quotation ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "body", 1, 1, 0, 0, 0, 0, 0, "document body ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "br", 0, 2, 2, 1, 0, 0, 1, "forced line break ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "button", 0, 0, 0, 0, 0, 0, 2, "push button ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "caption", 0, 0, 0, 0, 0, 0, 0, "table caption ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "center", 0, 3, 0, 0, 1, 1, 0, "shorthand for div align=center ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "cite", 0, 0, 0, 0, 0, 0, 1, "citation", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "code", 0, 0, 0, 0, 0, 0, 1, "computer code fragment", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "col", 0, 2, 2, 1, 0, 0, 0, "table column ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "colgroup", 0, 1, 0, 0, 0, 0, 0, "table column group ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "dd", 0, 1, 0, 0, 0, 0, 0, "definition description ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "del", 0, 0, 0, 0, 0, 0, 2, "deleted text ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "dfn", 0, 0, 0, 0, 0, 0, 1, "instance definition", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "dir", 0, 0, 0, 0, 1, 1, 0, "directory list", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "div", 0, 0, 0, 0, 0, 0, 0, "generic language/style container", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "dl", 0, 0, 0, 0, 0, 0, 0, "definition list ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "dt", 0, 1, 0, 0, 0, 0, 0, "definition term ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "em", 0, 3, 0, 0, 0, 0, 1, "emphasis", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "embed", 0, 1, 0, 0, 1, 1, 1, "generic embedded object ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "fieldset", 0, 0, 0, 0, 0, 0, 0, "form control group ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "font", 0, 3, 0, 0, 1, 1, 1, "local change to font ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "form", 0, 0, 0, 0, 0, 0, 0, "interactive form ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "frame", 0, 2, 2, 1, 0, 2, 0, "subwindow " , + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "frameset", 0, 0, 0, 0, 0, 2, 0, "window subdivision" , + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "h1", 0, 0, 0, 0, 0, 0, 0, "heading ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "h2", 0, 0, 0, 0, 0, 0, 0, "heading ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "h3", 0, 0, 0, 0, 0, 0, 0, "heading ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "h4", 0, 0, 0, 0, 0, 0, 0, "heading ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "h5", 0, 0, 0, 0, 0, 0, 0, "heading ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "h6", 0, 0, 0, 0, 0, 0, 0, "heading ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "head", 1, 1, 0, 0, 0, 0, 0, "document head ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "hr", 0, 2, 2, 1, 0, 0, 0, "horizontal rule " , + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "html", 1, 1, 0, 0, 0, 0, 0, "document root element ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "i", 0, 3, 0, 0, 0, 0, 1, "italic text style", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "iframe", 0, 0, 0, 0, 0, 1, 2, "inline subwindow ", + NULL, NULL, NULL, NULL, NULL, + DATA_RAWTEXT +}, +{ "img", 0, 2, 2, 1, 0, 0, 1, "embedded image ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "input", 0, 2, 2, 1, 0, 0, 1, "form control ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "ins", 0, 0, 0, 0, 0, 0, 2, "inserted text", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "isindex", 0, 2, 2, 1, 1, 1, 0, "single line prompt ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "kbd", 0, 0, 0, 0, 0, 0, 1, "text to be entered by the user", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "label", 0, 0, 0, 0, 0, 0, 1, "form field label text ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "legend", 0, 0, 0, 0, 0, 0, 0, "fieldset legend ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "li", 0, 1, 1, 0, 0, 0, 0, "list item ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "link", 0, 2, 2, 1, 0, 0, 0, "a media-independent link ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "map", 0, 0, 0, 0, 0, 0, 2, "client-side image map ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "menu", 0, 0, 0, 0, 1, 1, 0, "menu list ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "meta", 0, 2, 2, 1, 0, 0, 0, "generic metainformation ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "noembed", 0, 0, 0, 0, 0, 0, 0, "", + NULL, NULL, NULL, NULL, NULL, + DATA_RAWTEXT +}, +{ "noframes", 0, 0, 0, 0, 0, 2, 0, "alternate content container for non frame-based rendering ", + NULL, NULL, NULL, NULL, NULL, + DATA_RAWTEXT +}, +{ "noscript", 0, 0, 0, 0, 0, 0, 0, "alternate content container for non script-based rendering ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "object", 0, 0, 0, 0, 0, 0, 2, "generic embedded object ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "ol", 0, 0, 0, 0, 0, 0, 0, "ordered list ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "optgroup", 0, 0, 0, 0, 0, 0, 0, "option group ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "option", 0, 1, 0, 0, 0, 0, 0, "selectable choice " , + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "p", 0, 1, 0, 0, 0, 0, 0, "paragraph ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "param", 0, 2, 2, 1, 0, 0, 0, "named property value ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "plaintext", 0, 0, 0, 0, 0, 0, 0, "", + NULL, NULL, NULL, NULL, NULL, + DATA_PLAINTEXT +}, +{ "pre", 0, 0, 0, 0, 0, 0, 0, "preformatted text ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "q", 0, 0, 0, 0, 0, 0, 1, "short inline quotation ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "s", 0, 3, 0, 0, 1, 1, 1, "strike-through text style", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "samp", 0, 0, 0, 0, 0, 0, 1, "sample program output, scripts, etc.", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "script", 0, 0, 0, 0, 0, 0, 2, "script statements ", + NULL, NULL, NULL, NULL, NULL, + DATA_SCRIPT +}, +{ "select", 0, 0, 0, 0, 0, 0, 1, "option selector ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "small", 0, 3, 0, 0, 0, 0, 1, "small text style", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "span", 0, 0, 0, 0, 0, 0, 1, "generic language/style container ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "strike", 0, 3, 0, 0, 1, 1, 1, "strike-through text", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "strong", 0, 3, 0, 0, 0, 0, 1, "strong emphasis", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "style", 0, 0, 0, 0, 0, 0, 0, "style info ", + NULL, NULL, NULL, NULL, NULL, + DATA_RAWTEXT +}, +{ "sub", 0, 3, 0, 0, 0, 0, 1, "subscript", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "sup", 0, 3, 0, 0, 0, 0, 1, "superscript ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "table", 0, 0, 0, 0, 0, 0, 0, "", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "tbody", 1, 0, 0, 0, 0, 0, 0, "table body ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "td", 0, 0, 0, 0, 0, 0, 0, "table data cell", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "textarea", 0, 0, 0, 0, 0, 0, 1, "multi-line text field ", + NULL, NULL, NULL, NULL, NULL, + DATA_RCDATA +}, +{ "tfoot", 0, 1, 0, 0, 0, 0, 0, "table footer ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "th", 0, 1, 0, 0, 0, 0, 0, "table header cell", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "thead", 0, 1, 0, 0, 0, 0, 0, "table header ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "title", 0, 0, 0, 0, 0, 0, 0, "document title ", + NULL, NULL, NULL, NULL, NULL, + DATA_RCDATA +}, +{ "tr", 0, 0, 0, 0, 0, 0, 0, "table row ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "tt", 0, 3, 0, 0, 0, 0, 1, "teletype or monospaced text style", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "u", 0, 3, 0, 0, 1, 1, 1, "underlined text style", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "ul", 0, 0, 0, 0, 0, 0, 0, "unordered list ", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "var", 0, 0, 0, 0, 0, 0, 1, "instance of a variable or program argument", + NULL, NULL, NULL, NULL, NULL, + 0 +}, +{ "xmp", 0, 0, 0, 0, 0, 0, 1, "", + NULL, NULL, NULL, NULL, NULL, + DATA_RAWTEXT +} +}; + +typedef struct { + const char *oldTag; + const char *newTag; +} htmlStartCloseEntry; + +/* + * start tags that imply the end of current element + */ +static const htmlStartCloseEntry htmlStartClose[] = { + { "a", "a" }, + { "a", "fieldset" }, + { "a", "table" }, + { "a", "td" }, + { "a", "th" }, + { "address", "dd" }, + { "address", "dl" }, + { "address", "dt" }, + { "address", "form" }, + { "address", "li" }, + { "address", "ul" }, + { "b", "center" }, + { "b", "p" }, + { "b", "td" }, + { "b", "th" }, + { "big", "p" }, + { "caption", "col" }, + { "caption", "colgroup" }, + { "caption", "tbody" }, + { "caption", "tfoot" }, + { "caption", "thead" }, + { "caption", "tr" }, + { "col", "col" }, + { "col", "colgroup" }, + { "col", "tbody" }, + { "col", "tfoot" }, + { "col", "thead" }, + { "col", "tr" }, + { "colgroup", "colgroup" }, + { "colgroup", "tbody" }, + { "colgroup", "tfoot" }, + { "colgroup", "thead" }, + { "colgroup", "tr" }, + { "dd", "dt" }, + { "dir", "dd" }, + { "dir", "dl" }, + { "dir", "dt" }, + { "dir", "form" }, + { "dir", "ul" }, + { "dl", "form" }, + { "dl", "li" }, + { "dt", "dd" }, + { "dt", "dl" }, + { "font", "center" }, + { "font", "td" }, + { "font", "th" }, + { "form", "form" }, + { "h1", "fieldset" }, + { "h1", "form" }, + { "h1", "li" }, + { "h1", "p" }, + { "h1", "table" }, + { "h2", "fieldset" }, + { "h2", "form" }, + { "h2", "li" }, + { "h2", "p" }, + { "h2", "table" }, + { "h3", "fieldset" }, + { "h3", "form" }, + { "h3", "li" }, + { "h3", "p" }, + { "h3", "table" }, + { "h4", "fieldset" }, + { "h4", "form" }, + { "h4", "li" }, + { "h4", "p" }, + { "h4", "table" }, + { "h5", "fieldset" }, + { "h5", "form" }, + { "h5", "li" }, + { "h5", "p" }, + { "h5", "table" }, + { "h6", "fieldset" }, + { "h6", "form" }, + { "h6", "li" }, + { "h6", "p" }, + { "h6", "table" }, + { "head", "a" }, + { "head", "abbr" }, + { "head", "acronym" }, + { "head", "address" }, + { "head", "b" }, + { "head", "bdo" }, + { "head", "big" }, + { "head", "blockquote" }, + { "head", "body" }, + { "head", "br" }, + { "head", "center" }, + { "head", "cite" }, + { "head", "code" }, + { "head", "dd" }, + { "head", "dfn" }, + { "head", "dir" }, + { "head", "div" }, + { "head", "dl" }, + { "head", "dt" }, + { "head", "em" }, + { "head", "fieldset" }, + { "head", "font" }, + { "head", "form" }, + { "head", "frameset" }, + { "head", "h1" }, + { "head", "h2" }, + { "head", "h3" }, + { "head", "h4" }, + { "head", "h5" }, + { "head", "h6" }, + { "head", "hr" }, + { "head", "i" }, + { "head", "iframe" }, + { "head", "img" }, + { "head", "kbd" }, + { "head", "li" }, + { "head", "listing" }, + { "head", "map" }, + { "head", "menu" }, + { "head", "ol" }, + { "head", "p" }, + { "head", "pre" }, + { "head", "q" }, + { "head", "s" }, + { "head", "samp" }, + { "head", "small" }, + { "head", "span" }, + { "head", "strike" }, + { "head", "strong" }, + { "head", "sub" }, + { "head", "sup" }, + { "head", "table" }, + { "head", "tt" }, + { "head", "u" }, + { "head", "ul" }, + { "head", "var" }, + { "head", "xmp" }, + { "hr", "form" }, + { "i", "center" }, + { "i", "p" }, + { "i", "td" }, + { "i", "th" }, + { "legend", "fieldset" }, + { "li", "li" }, + { "link", "body" }, + { "link", "frameset" }, + { "listing", "dd" }, + { "listing", "dl" }, + { "listing", "dt" }, + { "listing", "fieldset" }, + { "listing", "form" }, + { "listing", "li" }, + { "listing", "table" }, + { "listing", "ul" }, + { "menu", "dd" }, + { "menu", "dl" }, + { "menu", "dt" }, + { "menu", "form" }, + { "menu", "ul" }, + { "ol", "form" }, + { "option", "optgroup" }, + { "option", "option" }, + { "p", "address" }, + { "p", "blockquote" }, + { "p", "body" }, + { "p", "caption" }, + { "p", "center" }, + { "p", "col" }, + { "p", "colgroup" }, + { "p", "dd" }, + { "p", "dir" }, + { "p", "div" }, + { "p", "dl" }, + { "p", "dt" }, + { "p", "fieldset" }, + { "p", "form" }, + { "p", "frameset" }, + { "p", "h1" }, + { "p", "h2" }, + { "p", "h3" }, + { "p", "h4" }, + { "p", "h5" }, + { "p", "h6" }, + { "p", "head" }, + { "p", "hr" }, + { "p", "li" }, + { "p", "listing" }, + { "p", "menu" }, + { "p", "ol" }, + { "p", "p" }, + { "p", "pre" }, + { "p", "table" }, + { "p", "tbody" }, + { "p", "td" }, + { "p", "tfoot" }, + { "p", "th" }, + { "p", "title" }, + { "p", "tr" }, + { "p", "ul" }, + { "p", "xmp" }, + { "pre", "dd" }, + { "pre", "dl" }, + { "pre", "dt" }, + { "pre", "fieldset" }, + { "pre", "form" }, + { "pre", "li" }, + { "pre", "table" }, + { "pre", "ul" }, + { "s", "p" }, + { "script", "noscript" }, + { "small", "p" }, + { "span", "td" }, + { "span", "th" }, + { "strike", "p" }, + { "style", "body" }, + { "style", "frameset" }, + { "tbody", "tbody" }, + { "tbody", "tfoot" }, + { "td", "tbody" }, + { "td", "td" }, + { "td", "tfoot" }, + { "td", "th" }, + { "td", "tr" }, + { "tfoot", "tbody" }, + { "th", "tbody" }, + { "th", "td" }, + { "th", "tfoot" }, + { "th", "th" }, + { "th", "tr" }, + { "thead", "tbody" }, + { "thead", "tfoot" }, + { "title", "body" }, + { "title", "frameset" }, + { "tr", "tbody" }, + { "tr", "tfoot" }, + { "tr", "tr" }, + { "tt", "p" }, + { "u", "p" }, + { "u", "td" }, + { "u", "th" }, + { "ul", "address" }, + { "ul", "form" }, + { "ul", "menu" }, + { "ul", "pre" }, + { "xmp", "dd" }, + { "xmp", "dl" }, + { "xmp", "dt" }, + { "xmp", "fieldset" }, + { "xmp", "form" }, + { "xmp", "li" }, + { "xmp", "table" }, + { "xmp", "ul" } +}; + +/* + * The list of HTML elements which are supposed not to have + * CDATA content and where a p element will be implied + * + * TODO: extend that list by reading the HTML SGML DTD on + * implied paragraph + */ +static const char *const htmlNoContentElements[] = { + "html", + "head", + NULL +}; + +/* + * The list of HTML attributes which are of content %Script; + * NOTE: when adding ones, check htmlIsScriptAttribute() since + * it assumes the name starts with 'on' + */ +static const char *const htmlScriptAttributes[] = { + "onclick", + "ondblclick", + "onmousedown", + "onmouseup", + "onmouseover", + "onmousemove", + "onmouseout", + "onkeypress", + "onkeydown", + "onkeyup", + "onload", + "onunload", + "onfocus", + "onblur", + "onsubmit", + "onreset", + "onchange", + "onselect" +}; + +/* + * This table is used by the htmlparser to know what to do with + * broken html pages. By assigning different priorities to different + * elements the parser can decide how to handle extra endtags. + * Endtags are only allowed to close elements with lower or equal + * priority. + */ + +typedef struct { + const char *name; + int priority; +} elementPriority; + +static const elementPriority htmlEndPriority[] = { + {"div", 150}, + {"td", 160}, + {"th", 160}, + {"tr", 170}, + {"thead", 180}, + {"tbody", 180}, + {"tfoot", 180}, + {"table", 190}, + {"head", 200}, + {"body", 200}, + {"html", 220}, + {NULL, 100} /* Default priority */ +}; + +/************************************************************************ + * * + * functions to handle HTML specific data * + * * + ************************************************************************/ + +static void +htmlParserFinishElementParsing(htmlParserCtxtPtr ctxt) { + /* + * Capture end position and add node + */ + if ( ctxt->node != NULL && ctxt->record_info ) { + ctxt->nodeInfo->end_pos = ctxt->input->consumed + + (CUR_PTR - ctxt->input->base); + ctxt->nodeInfo->end_line = ctxt->input->line; + ctxt->nodeInfo->node = ctxt->node; + xmlParserAddNodeInfo(ctxt, ctxt->nodeInfo); + htmlNodeInfoPop(ctxt); + } +} + +/** + * htmlInitAutoClose: + * + * DEPRECATED: This is a no-op. + */ +void +htmlInitAutoClose(void) { +} + +static int +htmlCompareTags(const void *key, const void *member) { + const xmlChar *tag = (const xmlChar *) key; + const htmlElemDesc *desc = (const htmlElemDesc *) member; + + return(xmlStrcasecmp(tag, BAD_CAST desc->name)); +} + +/** + * htmlTagLookup: + * @tag: The tag name in lowercase + * + * Lookup the HTML tag in the ElementTable + * + * Returns the related htmlElemDescPtr or NULL if not found. + */ +const htmlElemDesc * +htmlTagLookup(const xmlChar *tag) { + if (tag == NULL) + return(NULL); + + return((const htmlElemDesc *) bsearch(tag, html40ElementTable, + sizeof(html40ElementTable) / sizeof(htmlElemDesc), + sizeof(htmlElemDesc), htmlCompareTags)); +} + +/** + * htmlGetEndPriority: + * @name: The name of the element to look up the priority for. + * + * Return value: The "endtag" priority. + **/ +static int +htmlGetEndPriority (const xmlChar *name) { + int i = 0; + + while ((htmlEndPriority[i].name != NULL) && + (!xmlStrEqual((const xmlChar *)htmlEndPriority[i].name, name))) + i++; + + return(htmlEndPriority[i].priority); +} + + +static int +htmlCompareStartClose(const void *vkey, const void *member) { + const htmlStartCloseEntry *key = (const htmlStartCloseEntry *) vkey; + const htmlStartCloseEntry *entry = (const htmlStartCloseEntry *) member; + int ret; + + ret = strcmp(key->oldTag, entry->oldTag); + if (ret == 0) + ret = strcmp(key->newTag, entry->newTag); + + return(ret); +} + +/** + * htmlCheckAutoClose: + * @newtag: The new tag name + * @oldtag: The old tag name + * + * Checks whether the new tag is one of the registered valid tags for + * closing old. + * + * Returns 0 if no, 1 if yes. + */ +static int +htmlCheckAutoClose(const xmlChar * newtag, const xmlChar * oldtag) +{ + htmlStartCloseEntry key; + void *res; + + key.oldTag = (const char *) oldtag; + key.newTag = (const char *) newtag; + res = bsearch(&key, htmlStartClose, + sizeof(htmlStartClose) / sizeof(htmlStartCloseEntry), + sizeof(htmlStartCloseEntry), htmlCompareStartClose); + return(res != NULL); +} + +/** + * htmlAutoCloseOnClose: + * @ctxt: an HTML parser context + * @newtag: The new tag name + * @force: force the tag closure + * + * The HTML DTD allows an ending tag to implicitly close other tags. + */ +static void +htmlAutoCloseOnClose(htmlParserCtxtPtr ctxt, const xmlChar * newtag) +{ + const htmlElemDesc *info; + int i, priority; + + if (ctxt->options & HTML_PARSE_HTML5) + return; + + priority = htmlGetEndPriority(newtag); + + for (i = (ctxt->nameNr - 1); i >= 0; i--) { + + if (xmlStrEqual(newtag, ctxt->nameTab[i])) + break; + /* + * A misplaced endtag can only close elements with lower + * or equal priority, so if we find an element with higher + * priority before we find an element with + * matching name, we just ignore this endtag + */ + if (htmlGetEndPriority(ctxt->nameTab[i]) > priority) + return; + } + if (i < 0) + return; + + while (!xmlStrEqual(newtag, ctxt->name)) { + info = htmlTagLookup(ctxt->name); + if ((info != NULL) && (info->endTag == 3)) { + htmlParseErr(ctxt, XML_ERR_TAG_NAME_MISMATCH, + "Opening and ending tag mismatch: %s and %s\n", + newtag, ctxt->name); + } + htmlParserFinishElementParsing(ctxt); + if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, ctxt->name); + htmlnamePop(ctxt); + } +} + +/** + * htmlAutoCloseOnEnd: + * @ctxt: an HTML parser context + * + * Close all remaining tags at the end of the stream + */ +static void +htmlAutoCloseOnEnd(htmlParserCtxtPtr ctxt) +{ + int i; + + if (ctxt->options & HTML_PARSE_HTML5) + return; + + if (ctxt->nameNr == 0) + return; + for (i = (ctxt->nameNr - 1); i >= 0; i--) { + htmlParserFinishElementParsing(ctxt); + if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, ctxt->name); + htmlnamePop(ctxt); + } +} + +/** + * htmlAutoClose: + * @ctxt: an HTML parser context + * @newtag: The new tag name or NULL + * + * The HTML DTD allows a tag to implicitly close other tags. + * The list is kept in htmlStartClose array. This function is + * called when a new tag has been detected and generates the + * appropriates closes if possible/needed. + * If newtag is NULL this mean we are at the end of the resource + * and we should check + */ +static void +htmlAutoClose(htmlParserCtxtPtr ctxt, const xmlChar * newtag) +{ + if (ctxt->options & HTML_PARSE_HTML5) + return; + + if (newtag == NULL) + return; + + while ((ctxt->name != NULL) && + (htmlCheckAutoClose(newtag, ctxt->name))) { + htmlParserFinishElementParsing(ctxt); + if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, ctxt->name); + htmlnamePop(ctxt); + } +} + +/** + * htmlAutoCloseTag: + * @doc: the HTML document + * @name: The tag name + * @elem: the HTML element + * + * DEPRECATED: Internal function, don't use. + * + * The HTML DTD allows a tag to implicitly close other tags. + * The list is kept in htmlStartClose array. This function checks + * if the element or one of it's children would autoclose the + * given tag. + * + * Returns 1 if autoclose, 0 otherwise + */ +int +htmlAutoCloseTag(htmlDocPtr doc, const xmlChar *name, htmlNodePtr elem) { + htmlNodePtr child; + + if (elem == NULL) return(1); + if (xmlStrEqual(name, elem->name)) return(0); + if (htmlCheckAutoClose(elem->name, name)) return(1); + child = elem->children; + while (child != NULL) { + if (htmlAutoCloseTag(doc, name, child)) return(1); + child = child->next; + } + return(0); +} + +/** + * htmlIsAutoClosed: + * @doc: the HTML document + * @elem: the HTML element + * + * DEPRECATED: Internal function, don't use. + * + * The HTML DTD allows a tag to implicitly close other tags. + * The list is kept in htmlStartClose array. This function checks + * if a tag is autoclosed by one of it's child + * + * Returns 1 if autoclosed, 0 otherwise + */ +int +htmlIsAutoClosed(htmlDocPtr doc, htmlNodePtr elem) { + htmlNodePtr child; + + if (elem == NULL) return(1); + child = elem->children; + while (child != NULL) { + if (htmlAutoCloseTag(doc, elem->name, child)) return(1); + child = child->next; + } + return(0); +} + +/** + * htmlCheckImplied: + * @ctxt: an HTML parser context + * @newtag: The new tag name + * + * The HTML DTD allows a tag to exists only implicitly + * called when a new tag has been detected and generates the + * appropriates implicit tags if missing + */ +static void +htmlCheckImplied(htmlParserCtxtPtr ctxt, const xmlChar *newtag) { + int i; + + if (ctxt->options & (HTML_PARSE_NOIMPLIED | HTML_PARSE_HTML5)) + return; + if (!htmlOmittedDefaultValue) + return; + if (xmlStrEqual(newtag, BAD_CAST"html")) + return; + if (ctxt->nameNr <= 0) { + htmlnamePush(ctxt, BAD_CAST"html"); + if ((ctxt->sax != NULL) && (ctxt->sax->startElement != NULL)) + ctxt->sax->startElement(ctxt->userData, BAD_CAST"html", NULL); + } + if ((xmlStrEqual(newtag, BAD_CAST"body")) || (xmlStrEqual(newtag, BAD_CAST"head"))) + return; + if ((ctxt->nameNr <= 1) && + ((xmlStrEqual(newtag, BAD_CAST"script")) || + (xmlStrEqual(newtag, BAD_CAST"style")) || + (xmlStrEqual(newtag, BAD_CAST"meta")) || + (xmlStrEqual(newtag, BAD_CAST"link")) || + (xmlStrEqual(newtag, BAD_CAST"title")) || + (xmlStrEqual(newtag, BAD_CAST"base")))) { + if (ctxt->html >= 3) { + /* we already saw or generated an before */ + return; + } + /* + * dropped OBJECT ... i you put it first BODY will be + * assumed ! + */ + htmlnamePush(ctxt, BAD_CAST"head"); + if ((ctxt->sax != NULL) && (ctxt->sax->startElement != NULL)) + ctxt->sax->startElement(ctxt->userData, BAD_CAST"head", NULL); + } else if ((!xmlStrEqual(newtag, BAD_CAST"noframes")) && + (!xmlStrEqual(newtag, BAD_CAST"frame")) && + (!xmlStrEqual(newtag, BAD_CAST"frameset"))) { + if (ctxt->html >= 10) { + /* we already saw or generated a before */ + return; + } + for (i = 0;i < ctxt->nameNr;i++) { + if (xmlStrEqual(ctxt->nameTab[i], BAD_CAST"body")) { + return; + } + if (xmlStrEqual(ctxt->nameTab[i], BAD_CAST"head")) { + return; + } + } + + htmlnamePush(ctxt, BAD_CAST"body"); + if ((ctxt->sax != NULL) && (ctxt->sax->startElement != NULL)) + ctxt->sax->startElement(ctxt->userData, BAD_CAST"body", NULL); + } +} + +/** + * htmlCheckParagraph + * @ctxt: an HTML parser context + * + * Check whether a p element need to be implied before inserting + * characters in the current element. + * + * Returns 1 if a paragraph has been inserted, 0 if not and -1 + * in case of error. + */ + +static int +htmlCheckParagraph(htmlParserCtxtPtr ctxt) { + const xmlChar *tag; + int i; + + if (ctxt == NULL) + return(-1); + if (ctxt->options & HTML_PARSE_HTML5) + return(0); + + tag = ctxt->name; + if (tag == NULL) { + htmlAutoClose(ctxt, BAD_CAST"p"); + htmlCheckImplied(ctxt, BAD_CAST"p"); + htmlnamePush(ctxt, BAD_CAST"p"); + if ((ctxt->sax != NULL) && (ctxt->sax->startElement != NULL)) + ctxt->sax->startElement(ctxt->userData, BAD_CAST"p", NULL); + return(1); + } + if (!htmlOmittedDefaultValue) + return(0); + for (i = 0; htmlNoContentElements[i] != NULL; i++) { + if (xmlStrEqual(tag, BAD_CAST htmlNoContentElements[i])) { + htmlAutoClose(ctxt, BAD_CAST"p"); + htmlCheckImplied(ctxt, BAD_CAST"p"); + htmlnamePush(ctxt, BAD_CAST"p"); + if ((ctxt->sax != NULL) && (ctxt->sax->startElement != NULL)) + ctxt->sax->startElement(ctxt->userData, BAD_CAST"p", NULL); + return(1); + } + } + return(0); +} + +/** + * htmlIsScriptAttribute: + * @name: an attribute name + * + * Check if an attribute is of content type Script + * + * Returns 1 is the attribute is a script 0 otherwise + */ +int +htmlIsScriptAttribute(const xmlChar *name) { + unsigned int i; + + if (name == NULL) + return(0); + /* + * all script attributes start with 'on' + */ + if ((name[0] != 'o') || (name[1] != 'n')) + return(0); + for (i = 0; + i < sizeof(htmlScriptAttributes)/sizeof(htmlScriptAttributes[0]); + i++) { + if (xmlStrEqual(name, (const xmlChar *) htmlScriptAttributes[i])) + return(1); + } + return(0); +} + +/************************************************************************ + * * + * The list of HTML predefined entities * + * * + ************************************************************************/ + + +static const htmlEntityDesc html40EntitiesTable[] = { +/* + * the 4 absolute ones, plus apostrophe. + */ +{ 34, "quot", "quotation mark = APL quote, U+0022 ISOnum" }, +{ 38, "amp", "ampersand, U+0026 ISOnum" }, +{ 39, "apos", "single quote" }, +{ 60, "lt", "less-than sign, U+003C ISOnum" }, +{ 62, "gt", "greater-than sign, U+003E ISOnum" }, + +/* + * A bunch still in the 128-255 range + * Replacing them depend really on the charset used. + */ +{ 160, "nbsp", "no-break space = non-breaking space, U+00A0 ISOnum" }, +{ 161, "iexcl","inverted exclamation mark, U+00A1 ISOnum" }, +{ 162, "cent", "cent sign, U+00A2 ISOnum" }, +{ 163, "pound","pound sign, U+00A3 ISOnum" }, +{ 164, "curren","currency sign, U+00A4 ISOnum" }, +{ 165, "yen", "yen sign = yuan sign, U+00A5 ISOnum" }, +{ 166, "brvbar","broken bar = broken vertical bar, U+00A6 ISOnum" }, +{ 167, "sect", "section sign, U+00A7 ISOnum" }, +{ 168, "uml", "diaeresis = spacing diaeresis, U+00A8 ISOdia" }, +{ 169, "copy", "copyright sign, U+00A9 ISOnum" }, +{ 170, "ordf", "feminine ordinal indicator, U+00AA ISOnum" }, +{ 171, "laquo","left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum" }, +{ 172, "not", "not sign, U+00AC ISOnum" }, +{ 173, "shy", "soft hyphen = discretionary hyphen, U+00AD ISOnum" }, +{ 174, "reg", "registered sign = registered trade mark sign, U+00AE ISOnum" }, +{ 175, "macr", "macron = spacing macron = overline = APL overbar, U+00AF ISOdia" }, +{ 176, "deg", "degree sign, U+00B0 ISOnum" }, +{ 177, "plusmn","plus-minus sign = plus-or-minus sign, U+00B1 ISOnum" }, +{ 178, "sup2", "superscript two = superscript digit two = squared, U+00B2 ISOnum" }, +{ 179, "sup3", "superscript three = superscript digit three = cubed, U+00B3 ISOnum" }, +{ 180, "acute","acute accent = spacing acute, U+00B4 ISOdia" }, +{ 181, "micro","micro sign, U+00B5 ISOnum" }, +{ 182, "para", "pilcrow sign = paragraph sign, U+00B6 ISOnum" }, +{ 183, "middot","middle dot = Georgian comma Greek middle dot, U+00B7 ISOnum" }, +{ 184, "cedil","cedilla = spacing cedilla, U+00B8 ISOdia" }, +{ 185, "sup1", "superscript one = superscript digit one, U+00B9 ISOnum" }, +{ 186, "ordm", "masculine ordinal indicator, U+00BA ISOnum" }, +{ 187, "raquo","right-pointing double angle quotation mark right pointing guillemet, U+00BB ISOnum" }, +{ 188, "frac14","vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum" }, +{ 189, "frac12","vulgar fraction one half = fraction one half, U+00BD ISOnum" }, +{ 190, "frac34","vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum" }, +{ 191, "iquest","inverted question mark = turned question mark, U+00BF ISOnum" }, +{ 192, "Agrave","latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1" }, +{ 193, "Aacute","latin capital letter A with acute, U+00C1 ISOlat1" }, +{ 194, "Acirc","latin capital letter A with circumflex, U+00C2 ISOlat1" }, +{ 195, "Atilde","latin capital letter A with tilde, U+00C3 ISOlat1" }, +{ 196, "Auml", "latin capital letter A with diaeresis, U+00C4 ISOlat1" }, +{ 197, "Aring","latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1" }, +{ 198, "AElig","latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1" }, +{ 199, "Ccedil","latin capital letter C with cedilla, U+00C7 ISOlat1" }, +{ 200, "Egrave","latin capital letter E with grave, U+00C8 ISOlat1" }, +{ 201, "Eacute","latin capital letter E with acute, U+00C9 ISOlat1" }, +{ 202, "Ecirc","latin capital letter E with circumflex, U+00CA ISOlat1" }, +{ 203, "Euml", "latin capital letter E with diaeresis, U+00CB ISOlat1" }, +{ 204, "Igrave","latin capital letter I with grave, U+00CC ISOlat1" }, +{ 205, "Iacute","latin capital letter I with acute, U+00CD ISOlat1" }, +{ 206, "Icirc","latin capital letter I with circumflex, U+00CE ISOlat1" }, +{ 207, "Iuml", "latin capital letter I with diaeresis, U+00CF ISOlat1" }, +{ 208, "ETH", "latin capital letter ETH, U+00D0 ISOlat1" }, +{ 209, "Ntilde","latin capital letter N with tilde, U+00D1 ISOlat1" }, +{ 210, "Ograve","latin capital letter O with grave, U+00D2 ISOlat1" }, +{ 211, "Oacute","latin capital letter O with acute, U+00D3 ISOlat1" }, +{ 212, "Ocirc","latin capital letter O with circumflex, U+00D4 ISOlat1" }, +{ 213, "Otilde","latin capital letter O with tilde, U+00D5 ISOlat1" }, +{ 214, "Ouml", "latin capital letter O with diaeresis, U+00D6 ISOlat1" }, +{ 215, "times","multiplication sign, U+00D7 ISOnum" }, +{ 216, "Oslash","latin capital letter O with stroke latin capital letter O slash, U+00D8 ISOlat1" }, +{ 217, "Ugrave","latin capital letter U with grave, U+00D9 ISOlat1" }, +{ 218, "Uacute","latin capital letter U with acute, U+00DA ISOlat1" }, +{ 219, "Ucirc","latin capital letter U with circumflex, U+00DB ISOlat1" }, +{ 220, "Uuml", "latin capital letter U with diaeresis, U+00DC ISOlat1" }, +{ 221, "Yacute","latin capital letter Y with acute, U+00DD ISOlat1" }, +{ 222, "THORN","latin capital letter THORN, U+00DE ISOlat1" }, +{ 223, "szlig","latin small letter sharp s = ess-zed, U+00DF ISOlat1" }, +{ 224, "agrave","latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1" }, +{ 225, "aacute","latin small letter a with acute, U+00E1 ISOlat1" }, +{ 226, "acirc","latin small letter a with circumflex, U+00E2 ISOlat1" }, +{ 227, "atilde","latin small letter a with tilde, U+00E3 ISOlat1" }, +{ 228, "auml", "latin small letter a with diaeresis, U+00E4 ISOlat1" }, +{ 229, "aring","latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1" }, +{ 230, "aelig","latin small letter ae = latin small ligature ae, U+00E6 ISOlat1" }, +{ 231, "ccedil","latin small letter c with cedilla, U+00E7 ISOlat1" }, +{ 232, "egrave","latin small letter e with grave, U+00E8 ISOlat1" }, +{ 233, "eacute","latin small letter e with acute, U+00E9 ISOlat1" }, +{ 234, "ecirc","latin small letter e with circumflex, U+00EA ISOlat1" }, +{ 235, "euml", "latin small letter e with diaeresis, U+00EB ISOlat1" }, +{ 236, "igrave","latin small letter i with grave, U+00EC ISOlat1" }, +{ 237, "iacute","latin small letter i with acute, U+00ED ISOlat1" }, +{ 238, "icirc","latin small letter i with circumflex, U+00EE ISOlat1" }, +{ 239, "iuml", "latin small letter i with diaeresis, U+00EF ISOlat1" }, +{ 240, "eth", "latin small letter eth, U+00F0 ISOlat1" }, +{ 241, "ntilde","latin small letter n with tilde, U+00F1 ISOlat1" }, +{ 242, "ograve","latin small letter o with grave, U+00F2 ISOlat1" }, +{ 243, "oacute","latin small letter o with acute, U+00F3 ISOlat1" }, +{ 244, "ocirc","latin small letter o with circumflex, U+00F4 ISOlat1" }, +{ 245, "otilde","latin small letter o with tilde, U+00F5 ISOlat1" }, +{ 246, "ouml", "latin small letter o with diaeresis, U+00F6 ISOlat1" }, +{ 247, "divide","division sign, U+00F7 ISOnum" }, +{ 248, "oslash","latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1" }, +{ 249, "ugrave","latin small letter u with grave, U+00F9 ISOlat1" }, +{ 250, "uacute","latin small letter u with acute, U+00FA ISOlat1" }, +{ 251, "ucirc","latin small letter u with circumflex, U+00FB ISOlat1" }, +{ 252, "uuml", "latin small letter u with diaeresis, U+00FC ISOlat1" }, +{ 253, "yacute","latin small letter y with acute, U+00FD ISOlat1" }, +{ 254, "thorn","latin small letter thorn with, U+00FE ISOlat1" }, +{ 255, "yuml", "latin small letter y with diaeresis, U+00FF ISOlat1" }, + +{ 338, "OElig","latin capital ligature OE, U+0152 ISOlat2" }, +{ 339, "oelig","latin small ligature oe, U+0153 ISOlat2" }, +{ 352, "Scaron","latin capital letter S with caron, U+0160 ISOlat2" }, +{ 353, "scaron","latin small letter s with caron, U+0161 ISOlat2" }, +{ 376, "Yuml", "latin capital letter Y with diaeresis, U+0178 ISOlat2" }, + +/* + * Anything below should really be kept as entities references + */ +{ 402, "fnof", "latin small f with hook = function = florin, U+0192 ISOtech" }, + +{ 710, "circ", "modifier letter circumflex accent, U+02C6 ISOpub" }, +{ 732, "tilde","small tilde, U+02DC ISOdia" }, + +{ 913, "Alpha","greek capital letter alpha, U+0391" }, +{ 914, "Beta", "greek capital letter beta, U+0392" }, +{ 915, "Gamma","greek capital letter gamma, U+0393 ISOgrk3" }, +{ 916, "Delta","greek capital letter delta, U+0394 ISOgrk3" }, +{ 917, "Epsilon","greek capital letter epsilon, U+0395" }, +{ 918, "Zeta", "greek capital letter zeta, U+0396" }, +{ 919, "Eta", "greek capital letter eta, U+0397" }, +{ 920, "Theta","greek capital letter theta, U+0398 ISOgrk3" }, +{ 921, "Iota", "greek capital letter iota, U+0399" }, +{ 922, "Kappa","greek capital letter kappa, U+039A" }, +{ 923, "Lambda", "greek capital letter lambda, U+039B ISOgrk3" }, +{ 924, "Mu", "greek capital letter mu, U+039C" }, +{ 925, "Nu", "greek capital letter nu, U+039D" }, +{ 926, "Xi", "greek capital letter xi, U+039E ISOgrk3" }, +{ 927, "Omicron","greek capital letter omicron, U+039F" }, +{ 928, "Pi", "greek capital letter pi, U+03A0 ISOgrk3" }, +{ 929, "Rho", "greek capital letter rho, U+03A1" }, +{ 931, "Sigma","greek capital letter sigma, U+03A3 ISOgrk3" }, +{ 932, "Tau", "greek capital letter tau, U+03A4" }, +{ 933, "Upsilon","greek capital letter upsilon, U+03A5 ISOgrk3" }, +{ 934, "Phi", "greek capital letter phi, U+03A6 ISOgrk3" }, +{ 935, "Chi", "greek capital letter chi, U+03A7" }, +{ 936, "Psi", "greek capital letter psi, U+03A8 ISOgrk3" }, +{ 937, "Omega","greek capital letter omega, U+03A9 ISOgrk3" }, + +{ 945, "alpha","greek small letter alpha, U+03B1 ISOgrk3" }, +{ 946, "beta", "greek small letter beta, U+03B2 ISOgrk3" }, +{ 947, "gamma","greek small letter gamma, U+03B3 ISOgrk3" }, +{ 948, "delta","greek small letter delta, U+03B4 ISOgrk3" }, +{ 949, "epsilon","greek small letter epsilon, U+03B5 ISOgrk3" }, +{ 950, "zeta", "greek small letter zeta, U+03B6 ISOgrk3" }, +{ 951, "eta", "greek small letter eta, U+03B7 ISOgrk3" }, +{ 952, "theta","greek small letter theta, U+03B8 ISOgrk3" }, +{ 953, "iota", "greek small letter iota, U+03B9 ISOgrk3" }, +{ 954, "kappa","greek small letter kappa, U+03BA ISOgrk3" }, +{ 955, "lambda","greek small letter lambda, U+03BB ISOgrk3" }, +{ 956, "mu", "greek small letter mu, U+03BC ISOgrk3" }, +{ 957, "nu", "greek small letter nu, U+03BD ISOgrk3" }, +{ 958, "xi", "greek small letter xi, U+03BE ISOgrk3" }, +{ 959, "omicron","greek small letter omicron, U+03BF NEW" }, +{ 960, "pi", "greek small letter pi, U+03C0 ISOgrk3" }, +{ 961, "rho", "greek small letter rho, U+03C1 ISOgrk3" }, +{ 962, "sigmaf","greek small letter final sigma, U+03C2 ISOgrk3" }, +{ 963, "sigma","greek small letter sigma, U+03C3 ISOgrk3" }, +{ 964, "tau", "greek small letter tau, U+03C4 ISOgrk3" }, +{ 965, "upsilon","greek small letter upsilon, U+03C5 ISOgrk3" }, +{ 966, "phi", "greek small letter phi, U+03C6 ISOgrk3" }, +{ 967, "chi", "greek small letter chi, U+03C7 ISOgrk3" }, +{ 968, "psi", "greek small letter psi, U+03C8 ISOgrk3" }, +{ 969, "omega","greek small letter omega, U+03C9 ISOgrk3" }, +{ 977, "thetasym","greek small letter theta symbol, U+03D1 NEW" }, +{ 978, "upsih","greek upsilon with hook symbol, U+03D2 NEW" }, +{ 982, "piv", "greek pi symbol, U+03D6 ISOgrk3" }, + +{ 8194, "ensp", "en space, U+2002 ISOpub" }, +{ 8195, "emsp", "em space, U+2003 ISOpub" }, +{ 8201, "thinsp","thin space, U+2009 ISOpub" }, +{ 8204, "zwnj", "zero width non-joiner, U+200C NEW RFC 2070" }, +{ 8205, "zwj", "zero width joiner, U+200D NEW RFC 2070" }, +{ 8206, "lrm", "left-to-right mark, U+200E NEW RFC 2070" }, +{ 8207, "rlm", "right-to-left mark, U+200F NEW RFC 2070" }, +{ 8211, "ndash","en dash, U+2013 ISOpub" }, +{ 8212, "mdash","em dash, U+2014 ISOpub" }, +{ 8216, "lsquo","left single quotation mark, U+2018 ISOnum" }, +{ 8217, "rsquo","right single quotation mark, U+2019 ISOnum" }, +{ 8218, "sbquo","single low-9 quotation mark, U+201A NEW" }, +{ 8220, "ldquo","left double quotation mark, U+201C ISOnum" }, +{ 8221, "rdquo","right double quotation mark, U+201D ISOnum" }, +{ 8222, "bdquo","double low-9 quotation mark, U+201E NEW" }, +{ 8224, "dagger","dagger, U+2020 ISOpub" }, +{ 8225, "Dagger","double dagger, U+2021 ISOpub" }, + +{ 8226, "bull", "bullet = black small circle, U+2022 ISOpub" }, +{ 8230, "hellip","horizontal ellipsis = three dot leader, U+2026 ISOpub" }, + +{ 8240, "permil","per mille sign, U+2030 ISOtech" }, + +{ 8242, "prime","prime = minutes = feet, U+2032 ISOtech" }, +{ 8243, "Prime","double prime = seconds = inches, U+2033 ISOtech" }, + +{ 8249, "lsaquo","single left-pointing angle quotation mark, U+2039 ISO proposed" }, +{ 8250, "rsaquo","single right-pointing angle quotation mark, U+203A ISO proposed" }, + +{ 8254, "oline","overline = spacing overscore, U+203E NEW" }, +{ 8260, "frasl","fraction slash, U+2044 NEW" }, + +{ 8364, "euro", "euro sign, U+20AC NEW" }, + +{ 8465, "image","blackletter capital I = imaginary part, U+2111 ISOamso" }, +{ 8472, "weierp","script capital P = power set = Weierstrass p, U+2118 ISOamso" }, +{ 8476, "real", "blackletter capital R = real part symbol, U+211C ISOamso" }, +{ 8482, "trade","trade mark sign, U+2122 ISOnum" }, +{ 8501, "alefsym","alef symbol = first transfinite cardinal, U+2135 NEW" }, +{ 8592, "larr", "leftwards arrow, U+2190 ISOnum" }, +{ 8593, "uarr", "upwards arrow, U+2191 ISOnum" }, +{ 8594, "rarr", "rightwards arrow, U+2192 ISOnum" }, +{ 8595, "darr", "downwards arrow, U+2193 ISOnum" }, +{ 8596, "harr", "left right arrow, U+2194 ISOamsa" }, +{ 8629, "crarr","downwards arrow with corner leftwards = carriage return, U+21B5 NEW" }, +{ 8656, "lArr", "leftwards double arrow, U+21D0 ISOtech" }, +{ 8657, "uArr", "upwards double arrow, U+21D1 ISOamsa" }, +{ 8658, "rArr", "rightwards double arrow, U+21D2 ISOtech" }, +{ 8659, "dArr", "downwards double arrow, U+21D3 ISOamsa" }, +{ 8660, "hArr", "left right double arrow, U+21D4 ISOamsa" }, + +{ 8704, "forall","for all, U+2200 ISOtech" }, +{ 8706, "part", "partial differential, U+2202 ISOtech" }, +{ 8707, "exist","there exists, U+2203 ISOtech" }, +{ 8709, "empty","empty set = null set = diameter, U+2205 ISOamso" }, +{ 8711, "nabla","nabla = backward difference, U+2207 ISOtech" }, +{ 8712, "isin", "element of, U+2208 ISOtech" }, +{ 8713, "notin","not an element of, U+2209 ISOtech" }, +{ 8715, "ni", "contains as member, U+220B ISOtech" }, +{ 8719, "prod", "n-ary product = product sign, U+220F ISOamsb" }, +{ 8721, "sum", "n-ary summation, U+2211 ISOamsb" }, +{ 8722, "minus","minus sign, U+2212 ISOtech" }, +{ 8727, "lowast","asterisk operator, U+2217 ISOtech" }, +{ 8730, "radic","square root = radical sign, U+221A ISOtech" }, +{ 8733, "prop", "proportional to, U+221D ISOtech" }, +{ 8734, "infin","infinity, U+221E ISOtech" }, +{ 8736, "ang", "angle, U+2220 ISOamso" }, +{ 8743, "and", "logical and = wedge, U+2227 ISOtech" }, +{ 8744, "or", "logical or = vee, U+2228 ISOtech" }, +{ 8745, "cap", "intersection = cap, U+2229 ISOtech" }, +{ 8746, "cup", "union = cup, U+222A ISOtech" }, +{ 8747, "int", "integral, U+222B ISOtech" }, +{ 8756, "there4","therefore, U+2234 ISOtech" }, +{ 8764, "sim", "tilde operator = varies with = similar to, U+223C ISOtech" }, +{ 8773, "cong", "approximately equal to, U+2245 ISOtech" }, +{ 8776, "asymp","almost equal to = asymptotic to, U+2248 ISOamsr" }, +{ 8800, "ne", "not equal to, U+2260 ISOtech" }, +{ 8801, "equiv","identical to, U+2261 ISOtech" }, +{ 8804, "le", "less-than or equal to, U+2264 ISOtech" }, +{ 8805, "ge", "greater-than or equal to, U+2265 ISOtech" }, +{ 8834, "sub", "subset of, U+2282 ISOtech" }, +{ 8835, "sup", "superset of, U+2283 ISOtech" }, +{ 8836, "nsub", "not a subset of, U+2284 ISOamsn" }, +{ 8838, "sube", "subset of or equal to, U+2286 ISOtech" }, +{ 8839, "supe", "superset of or equal to, U+2287 ISOtech" }, +{ 8853, "oplus","circled plus = direct sum, U+2295 ISOamsb" }, +{ 8855, "otimes","circled times = vector product, U+2297 ISOamsb" }, +{ 8869, "perp", "up tack = orthogonal to = perpendicular, U+22A5 ISOtech" }, +{ 8901, "sdot", "dot operator, U+22C5 ISOamsb" }, +{ 8968, "lceil","left ceiling = apl upstile, U+2308 ISOamsc" }, +{ 8969, "rceil","right ceiling, U+2309 ISOamsc" }, +{ 8970, "lfloor","left floor = apl downstile, U+230A ISOamsc" }, +{ 8971, "rfloor","right floor, U+230B ISOamsc" }, +{ 9001, "lang", "left-pointing angle bracket = bra, U+2329 ISOtech" }, +{ 9002, "rang", "right-pointing angle bracket = ket, U+232A ISOtech" }, +{ 9674, "loz", "lozenge, U+25CA ISOpub" }, + +{ 9824, "spades","black spade suit, U+2660 ISOpub" }, +{ 9827, "clubs","black club suit = shamrock, U+2663 ISOpub" }, +{ 9829, "hearts","black heart suit = valentine, U+2665 ISOpub" }, +{ 9830, "diams","black diamond suit, U+2666 ISOpub" }, + +}; + +/************************************************************************ + * * + * Commodity functions to handle entities * + * * + ************************************************************************/ + +/** + * htmlEntityLookup: + * @name: the entity name + * + * Lookup the given entity in EntitiesTable + * + * TODO: the linear scan is really ugly, an hash table is really needed. + * + * Returns the associated htmlEntityDescPtr if found, NULL otherwise. + */ +const htmlEntityDesc * +htmlEntityLookup(const xmlChar *name) { + unsigned int i; + + for (i = 0;i < (sizeof(html40EntitiesTable)/ + sizeof(html40EntitiesTable[0]));i++) { + if (xmlStrEqual(name, BAD_CAST html40EntitiesTable[i].name)) { + return((htmlEntityDescPtr) &html40EntitiesTable[i]); + } + } + return(NULL); +} + +static int +htmlCompareEntityDesc(const void *vkey, const void *vdesc) { + const unsigned *key = vkey; + const htmlEntityDesc *desc = vdesc; + + return((int) *key - (int) desc->value); +} + +/** + * htmlEntityValueLookup: + * @value: the entity's unicode value + * + * Lookup the given entity in EntitiesTable + * + * TODO: the linear scan is really ugly, an hash table is really needed. + * + * Returns the associated htmlEntityDescPtr if found, NULL otherwise. + */ +const htmlEntityDesc * +htmlEntityValueLookup(unsigned int value) { + const htmlEntityDesc *desc; + size_t nmemb; + + nmemb = sizeof(html40EntitiesTable) / sizeof(html40EntitiesTable[0]); + desc = bsearch(&value, html40EntitiesTable, nmemb, sizeof(htmlEntityDesc), + htmlCompareEntityDesc); + + return(desc); +} + +/** + * UTF8ToHtml: + * @out: a pointer to an array of bytes to store the result + * @outlen: the length of @out + * @in: a pointer to an array of UTF-8 chars + * @inlen: the length of @in + * + * Take a block of UTF-8 chars in and try to convert it to an ASCII + * plus HTML entities block of chars out. + * + * Returns 0 if success, -2 if the transcoding fails, or -1 otherwise + * The value of @inlen after return is the number of octets consumed + * as the return value is positive, else unpredictable. + * The value of @outlen after return is the number of octets consumed. + */ +int +UTF8ToHtml(unsigned char* out, int *outlen, + const unsigned char* in, int *inlen) { + const unsigned char* instart = in; + const unsigned char* inend; + unsigned char* outstart = out; + unsigned char* outend; + int ret = XML_ENC_ERR_SPACE; + + if ((out == NULL) || (outlen == NULL) || (inlen == NULL)) + return(XML_ENC_ERR_INTERNAL); + + if (in == NULL) { + /* + * initialization nothing to do + */ + *outlen = 0; + *inlen = 0; + return(XML_ENC_ERR_SUCCESS); + } + + inend = in + *inlen; + outend = out + *outlen; + while (in < inend) { + const htmlEntityDesc *ent; + const char *cp; + char nbuf[16]; + unsigned c, d; + int seqlen, len, i; + + d = *in; + + if (d < 0x80) { + if (out >= outend) + goto done; + *out++ = d; + in += 1; + continue; + } + + if (d < 0xE0) { c = d & 0x1F; seqlen = 2; } + else if (d < 0xF0) { c = d & 0x0F; seqlen = 3; } + else { c = d & 0x07; seqlen = 4; } + + if (inend - in < seqlen) + break; + + for (i = 1; i < seqlen; i++) { + d = in[i]; + c <<= 6; + c |= d & 0x3F; + } + + /* + * Try to lookup a predefined HTML entity for it + */ + ent = htmlEntityValueLookup(c); + + if (ent == NULL) { + snprintf(nbuf, sizeof(nbuf), "#%u", c); + cp = nbuf; + } else { + cp = ent->name; + } + + len = strlen(cp); + if (outend - out < len + 2) + goto done; + + *out++ = '&'; + memcpy(out, cp, len); + out += len; + *out++ = ';'; + + in += seqlen; + } + + ret = out - outstart; + +done: + *outlen = out - outstart; + *inlen = in - instart; + return(ret); +} + +/** + * htmlEncodeEntities: + * @out: a pointer to an array of bytes to store the result + * @outlen: the length of @out + * @in: a pointer to an array of UTF-8 chars + * @inlen: the length of @in + * @quoteChar: the quote character to escape (' or ") or zero. + * + * Take a block of UTF-8 chars in and try to convert it to an ASCII + * plus HTML entities block of chars out. + * + * Returns 0 if success, -2 if the transcoding fails, or -1 otherwise + * The value of @inlen after return is the number of octets consumed + * as the return value is positive, else unpredictable. + * The value of @outlen after return is the number of octets consumed. + */ +int +htmlEncodeEntities(unsigned char* out, int *outlen, + const unsigned char* in, int *inlen, int quoteChar) { + const unsigned char* processed = in; + const unsigned char* outend; + const unsigned char* outstart = out; + const unsigned char* instart = in; + const unsigned char* inend; + unsigned int c, d; + int trailing; + + if ((out == NULL) || (outlen == NULL) || (inlen == NULL) || (in == NULL)) + return(-1); + outend = out + (*outlen); + inend = in + (*inlen); + while (in < inend) { + d = *in++; + if (d < 0x80) { c= d; trailing= 0; } + else if (d < 0xC0) { + /* trailing byte in leading position */ + *outlen = out - outstart; + *inlen = processed - instart; + return(-2); + } else if (d < 0xE0) { c= d & 0x1F; trailing= 1; } + else if (d < 0xF0) { c= d & 0x0F; trailing= 2; } + else if (d < 0xF8) { c= d & 0x07; trailing= 3; } + else { + /* no chance for this in Ascii */ + *outlen = out - outstart; + *inlen = processed - instart; + return(-2); + } + + if (inend - in < trailing) + break; + + while (trailing--) { + if (((d= *in++) & 0xC0) != 0x80) { + *outlen = out - outstart; + *inlen = processed - instart; + return(-2); + } + c <<= 6; + c |= d & 0x3F; + } + + /* assertion: c is a single UTF-4 value */ + if ((c < 0x80) && (c != (unsigned int) quoteChar) && + (c != '&') && (c != '<') && (c != '>')) { + if (out >= outend) + break; + *out++ = c; + } else { + const htmlEntityDesc * ent; + const char *cp; + char nbuf[16]; + int len; + + /* + * Try to lookup a predefined HTML entity for it + */ + ent = htmlEntityValueLookup(c); + if (ent == NULL) { + snprintf(nbuf, sizeof(nbuf), "#%u", c); + cp = nbuf; + } + else + cp = ent->name; + len = strlen(cp); + if (outend - out < len + 2) + break; + *out++ = '&'; + memcpy(out, cp, len); + out += len; + *out++ = ';'; + } + processed = in; + } + *outlen = out - outstart; + *inlen = processed - instart; + return(0); +} + +/************************************************************************ + * * + * Commodity functions, cleanup needed ? * + * * + ************************************************************************/ +/* + * all tags allowing pc data from the html 4.01 loose dtd + * NOTE: it might be more appropriate to integrate this information + * into the html40ElementTable array but I don't want to risk any + * binary incompatibility + */ +static const char *allowPCData[] = { + "a", "abbr", "acronym", "address", "applet", "b", "bdo", "big", + "blockquote", "body", "button", "caption", "center", "cite", "code", + "dd", "del", "dfn", "div", "dt", "em", "font", "form", "h1", "h2", + "h3", "h4", "h5", "h6", "i", "iframe", "ins", "kbd", "label", "legend", + "li", "noframes", "noscript", "object", "p", "pre", "q", "s", "samp", + "small", "span", "strike", "strong", "td", "th", "tt", "u", "var" +}; + +/** + * areBlanks: + * @ctxt: an HTML parser context + * @str: a xmlChar * + * @len: the size of @str + * + * Is this a sequence of blank chars that one can ignore ? + * + * Returns 1 if ignorable 0 if whitespace, -1 otherwise. + */ + +static int areBlanks(htmlParserCtxtPtr ctxt, const xmlChar *str, int len) { + unsigned int i; + int j; + xmlNodePtr lastChild; + xmlDtdPtr dtd; + + for (j = 0;j < len;j++) + if (!(IS_WS_HTML(str[j]))) return(-1); + + if (CUR == 0) return(1); + if (CUR != '<') return(0); + if (ctxt->name == NULL) + return(1); + if (xmlStrEqual(ctxt->name, BAD_CAST"html")) + return(1); + if (xmlStrEqual(ctxt->name, BAD_CAST"head")) + return(1); + + /* Only strip CDATA children of the body tag for strict HTML DTDs */ + if (xmlStrEqual(ctxt->name, BAD_CAST "body") && ctxt->myDoc != NULL) { + dtd = xmlGetIntSubset(ctxt->myDoc); + if (dtd != NULL && dtd->ExternalID != NULL) { + if (!xmlStrcasecmp(dtd->ExternalID, BAD_CAST "-//W3C//DTD HTML 4.01//EN") || + !xmlStrcasecmp(dtd->ExternalID, BAD_CAST "-//W3C//DTD HTML 4//EN")) + return(1); + } + } + + if (ctxt->node == NULL) return(0); + lastChild = xmlGetLastChild(ctxt->node); + while ((lastChild) && (lastChild->type == XML_COMMENT_NODE)) + lastChild = lastChild->prev; + if (lastChild == NULL) { + if ((ctxt->node->type != XML_ELEMENT_NODE) && + (ctxt->node->content != NULL)) return(0); + /* keep ws in constructs like ... ... + for all tags "b" allowing PCDATA */ + for ( i = 0; i < sizeof(allowPCData)/sizeof(allowPCData[0]); i++ ) { + if ( xmlStrEqual(ctxt->name, BAD_CAST allowPCData[i]) ) { + return(0); + } + } + } else if (xmlNodeIsText(lastChild)) { + return(0); + } else { + /* keep ws in constructs like

xy z

+ for all tags "p" allowing PCDATA */ + for ( i = 0; i < sizeof(allowPCData)/sizeof(allowPCData[0]); i++ ) { + if ( xmlStrEqual(lastChild->name, BAD_CAST allowPCData[i]) ) { + return(0); + } + } + } + return(1); +} + +/** + * htmlNewDocNoDtD: + * @URI: URI for the dtd, or NULL + * @ExternalID: the external ID of the DTD, or NULL + * + * Creates a new HTML document without a DTD node if @URI and @ExternalID + * are NULL + * + * Returns a new document, do not initialize the DTD if not provided + */ +htmlDocPtr +htmlNewDocNoDtD(const xmlChar *URI, const xmlChar *ExternalID) { + xmlDocPtr cur; + + /* + * Allocate a new document and fill the fields. + */ + cur = (xmlDocPtr) xmlMalloc(sizeof(xmlDoc)); + if (cur == NULL) + return(NULL); + memset(cur, 0, sizeof(xmlDoc)); + + cur->type = XML_HTML_DOCUMENT_NODE; + cur->version = NULL; + cur->intSubset = NULL; + cur->doc = cur; + cur->name = NULL; + cur->children = NULL; + cur->extSubset = NULL; + cur->oldNs = NULL; + cur->encoding = NULL; + cur->standalone = 1; + cur->compression = 0; + cur->ids = NULL; + cur->refs = NULL; + cur->_private = NULL; + cur->charset = XML_CHAR_ENCODING_UTF8; + cur->properties = XML_DOC_HTML | XML_DOC_USERBUILT; + if ((ExternalID != NULL) || + (URI != NULL)) { + xmlDtdPtr intSubset; + + intSubset = xmlCreateIntSubset(cur, BAD_CAST "html", ExternalID, URI); + if (intSubset == NULL) { + xmlFree(cur); + return(NULL); + } + } + if ((xmlRegisterCallbacks) && (xmlRegisterNodeDefaultValue)) + xmlRegisterNodeDefaultValue((xmlNodePtr)cur); + return(cur); +} + +/** + * htmlNewDoc: + * @URI: URI for the dtd, or NULL + * @ExternalID: the external ID of the DTD, or NULL + * + * Creates a new HTML document + * + * Returns a new document + */ +htmlDocPtr +htmlNewDoc(const xmlChar *URI, const xmlChar *ExternalID) { + if ((URI == NULL) && (ExternalID == NULL)) + return(htmlNewDocNoDtD( + BAD_CAST "http://www.w3.org/TR/REC-html40/loose.dtd", + BAD_CAST "-//W3C//DTD HTML 4.0 Transitional//EN")); + + return(htmlNewDocNoDtD(URI, ExternalID)); +} + + +/************************************************************************ + * * + * The parser itself * + * Relates to http://www.w3.org/TR/html40 * + * * + ************************************************************************/ + +/************************************************************************ + * * + * The parser itself * + * * + ************************************************************************/ + +/** + * htmlParseHTMLName: + * @ctxt: an HTML parser context + * + * parse an HTML tag or attribute name, note that we convert it to lowercase + * since HTML names are not case-sensitive. + * + * Returns the Tag Name parsed or NULL + */ + +static xmlHashedString +htmlParseHTMLName(htmlParserCtxtPtr ctxt, int attr) { + xmlHashedString ret; + xmlChar buf[HTML_PARSER_BUFFER_SIZE]; + const xmlChar *in; + size_t avail; + int eof = PARSER_PROGRESSIVE(ctxt); + int nbchar = 0; + int stop = attr ? '=' : ' '; + + in = ctxt->input->cur; + avail = ctxt->input->end - in; + + while (1) { + int c, size; + + if ((!eof) && (avail < 32)) { + size_t oldAvail = avail; + + ctxt->input->cur = in; + + SHRINK; + xmlParserGrow(ctxt); + + in = ctxt->input->cur; + avail = ctxt->input->end - in; + + if (oldAvail == avail) + eof = 1; + } + + if (avail == 0) + break; + + c = *in; + size = 1; + + if ((nbchar != 0) && + ((c == '/') || (c == '>') || (c == stop) || + (IS_WS_HTML(c)))) + break; + + if (c == 0) { + if (nbchar + 3 <= HTML_PARSER_BUFFER_SIZE) { + buf[nbchar++] = 0xEF; + buf[nbchar++] = 0xBF; + buf[nbchar++] = 0xBD; + } + } else if (c < 0x80) { + if (nbchar < HTML_PARSER_BUFFER_SIZE) { + if (IS_UPPER(c)) + c += 0x20; + buf[nbchar++] = c; + } + } else { + size = htmlValidateUtf8(ctxt, in, avail); + + if (size > 0) { + if (nbchar + size <= HTML_PARSER_BUFFER_SIZE) { + memcpy(buf + nbchar, in, size); + nbchar += size; + } + } else { + size = 1; + + if (nbchar + 3 <= HTML_PARSER_BUFFER_SIZE) { + buf[nbchar++] = 0xEF; + buf[nbchar++] = 0xBF; + buf[nbchar++] = 0xBD; + } + } + } + + in += size; + avail -= size; + } + + ctxt->input->cur = in; + + SHRINK; + + ret = xmlDictLookupHashed(ctxt->dict, buf, nbchar); + if (ret.name == NULL) + htmlErrMemory(ctxt); + + return(ret); +} + +static const short htmlC1Remap[32] = { + 0x20AC, 0x0081, 0x201A, 0x0192, 0x201E, 0x2026, 0x2020, 0x2021, + 0x02C6, 0x2030, 0x0160, 0x2039, 0x0152, 0x008D, 0x017D, 0x008F, + 0x0090, 0x2018, 0x2019, 0x201C, 0x201D, 0x2022, 0x2013, 0x2014, + 0x02DC, 0x2122, 0x0161, 0x203A, 0x0153, 0x009D, 0x017E, 0x0178 +}; + +static const xmlChar * +htmlCodePointToUtf8(int c, xmlChar *out, int *osize) { + int i = 0; + int bits, hi; + + if ((c >= 0x80) && (c < 0xA0)) { + c = htmlC1Remap[c - 0x80]; + } else if ((c <= 0) || + ((c >= 0xD800) && (c < 0xE000)) || + (c > 0x10FFFF)) { + c = 0xFFFD; + } + + if (c < 0x80) { bits = 0; hi = 0x00; } + else if (c < 0x800) { bits = 6; hi = 0xC0; } + else if (c < 0x10000) { bits = 12; hi = 0xE0; } + else { bits = 18; hi = 0xF0; } + + out[i++] = (c >> bits) | hi; + + while (bits > 0) { + bits -= 6; + out[i++] = ((c >> bits) & 0x3F) | 0x80; + } + + *osize = i; + return(out); +} + +#include "html5ent.inc" + +#define ENT_F_SEMICOLON 0x80u +#define ENT_F_SUBTABLE 0x40u +#define ENT_F_ALL 0xC0u + +static const xmlChar * +htmlFindEntityPrefix(const xmlChar *string, size_t slen, int isAttr, + int *nlen, int *rlen) { + const xmlChar *match = NULL; + unsigned left, right; + int first = string[0]; + size_t matchLen = 0; + size_t soff = 1; + + if (slen < 2) + return(NULL); + if (!IS_ASCII_LETTER(first)) + return(NULL); + + /* + * Look up range by first character + */ + first &= 63; + left = htmlEntAlpha[first*3] | htmlEntAlpha[first*3+1] << 8; + right = left + htmlEntAlpha[first*3+2]; + + /* + * Binary search + */ + while (left < right) { + const xmlChar *bytes; + unsigned mid; + size_t len; + int cmp; + + mid = left + (right - left) / 2; + bytes = htmlEntStrings + htmlEntValues[mid]; + len = bytes[0] & ~ENT_F_ALL; + + cmp = string[soff] - bytes[1]; + + if (cmp == 0) { + if (slen < len) { + cmp = strncmp((const char *) string + soff + 1, + (const char *) bytes + 2, + slen - 1); + /* Prefix can never match */ + if (cmp == 0) + break; + } else { + cmp = strncmp((const char *) string + soff + 1, + (const char *) bytes + 2, + len - 1); + } + } + + if (cmp < 0) { + right = mid; + } else if (cmp > 0) { + left = mid + 1; + } else { + int term = soff + len < slen ? string[soff + len] : 0; + int isAlnum, isTerm; + + isAlnum = IS_ALNUM(term); + isTerm = ((term == ';') || + ((bytes[0] & ENT_F_SEMICOLON) && + ((!isAttr) || + ((!isAlnum) && (term != '='))))); + + if (isTerm) { + match = bytes + len + 1; + matchLen = soff + len; + if (term == ';') + matchLen += 1; + } + + if (bytes[0] & ENT_F_SUBTABLE) { + if (isTerm) + match += 2; + + if ((isAlnum) && (soff + len < slen)) { + left = mid + bytes[len + 1]; + right = left + bytes[len + 2]; + soff += len; + continue; + } + } + + break; + } + } + + if (match == NULL) + return(NULL); + + *nlen = matchLen; + *rlen = match[0]; + return(match + 1); +} + +/** + * htmlParseData: + * @ctxt: an HTML parser context + * @mask: mask of terminating characters + * @comment: true if parsing a comment + * @refs: true if references are allowed + * @maxLength: maximum output length + * + * Parse data until terminator is reached. + * + * Returns the parsed string or NULL in case of errors. + */ + +static xmlChar * +htmlParseData(htmlParserCtxtPtr ctxt, htmlAsciiMask mask, + int comment, int refs, int maxLength) { + xmlParserInputPtr input = ctxt->input; + xmlChar *ret = NULL; + xmlChar *buffer; + xmlChar utf8Char[4]; + size_t buffer_size; + size_t used; + int eof = PARSER_PROGRESSIVE(ctxt); + int line, col; + int termSkip = -1; + + used = 0; + buffer_size = ctxt->spaceMax; + buffer = (xmlChar *) ctxt->spaceTab; + if (buffer == NULL) { + buffer_size = 500; + buffer = xmlMalloc(buffer_size + 1); + if (buffer == NULL) { + htmlErrMemory(ctxt); + return(NULL); + } + } + + line = input->line; + col = input->col; + + while (!PARSER_STOPPED(ctxt)) { + const xmlChar *chunk, *in, *repl; + size_t avail, chunkSize, extraSize; + int replSize; + int skip = 0; + int ncr = 0; + int ncrSize = 0; + int cp = 0; + + chunk = input->cur; + avail = input->end - chunk; + in = chunk; + + repl = BAD_CAST ""; + replSize = 0; + + while (!PARSER_STOPPED(ctxt)) { + size_t j; + int cur, size; + + if ((!eof) && (avail <= 64)) { + size_t oldAvail = avail; + size_t off = in - chunk; + + input->cur = in; + + xmlParserGrow(ctxt); + + in = input->cur; + chunk = in - off; + input->cur = chunk; + avail = input->end - in; + + if (oldAvail == avail) + eof = 1; + } + + if (avail == 0) { + termSkip = 0; + break; + } + + cur = *in; + size = 1; + col += 1; + + if (htmlMaskMatch(mask, cur)) { + if (comment) { + if (avail < 2) { + termSkip = 1; + } else if (in[1] == '-') { + if (avail < 3) { + termSkip = 2; + } else if (in[2] == '>') { + termSkip = 3; + } else if (in[2] == '!') { + if (avail < 4) + termSkip = 3; + else if (in[3] == '>') + termSkip = 4; + } + } + + if (termSkip >= 0) + break; + } else { + termSkip = 0; + break; + } + } + + if (ncr) { + int lc = cur | 0x20; + int digit; + + if ((cur >= '0') && (cur <= '9')) { + digit = cur - '0'; + } else if ((ncr == 16) && (lc >= 'a') && (lc <= 'f')) { + digit = (lc - 'a') + 10; + } else { + if (cur == ';') { + in += 1; + size += 1; + ncrSize += 1; + } + goto next_chunk; + } + + cp = cp * ncr + digit; + if (cp >= 0x110000) + cp = 0x110000; + + ncrSize += 1; + + goto next_char; + } + + switch (cur) { + case '&': + if (!refs) + break; + + j = 1; + + if ((j < avail) && (in[j] == '#')) { + j += 1; + if (j < avail) { + if ((in[j] | 0x20) == 'x') { + j += 1; + if ((j < avail) && (IS_HEX_DIGIT(in[j]))) { + ncr = 16; + size = 3; + ncrSize = 3; + cp = 0; + } + } else if (IS_ASCII_DIGIT(in[j])) { + ncr = 10; + size = 2; + ncrSize = 2; + cp = 0; + } + } + } else { + repl = htmlFindEntityPrefix(in + j, + avail - j, + /* isAttr */ 1, + &skip, &replSize); + if (repl != NULL) { + skip += 1; + goto next_chunk; + } + + skip = 0; + } + + break; + + case '\0': + skip = 1; + repl = BAD_CAST "\xEF\xBF\xBD"; + replSize = 3; + goto next_chunk; + + case '\n': + line += 1; + col = 1; + break; + + case '\r': + skip = 1; + if (in[1] != 0x0A) { + repl = BAD_CAST "\x0A"; + replSize = 1; + } + goto next_chunk; + + default: + if (cur < 0x80) + break; + + if ((input->flags & XML_INPUT_HAS_ENCODING) == 0) { + xmlChar * guess; + + guess = htmlFindEncoding(ctxt); + if (guess == NULL) { + xmlSwitchEncoding(ctxt, XML_CHAR_ENCODING_8859_1); + } else { + xmlSwitchEncodingName(ctxt, (const char *) guess); + xmlFree(guess); + } + input->flags |= XML_INPUT_HAS_ENCODING; + + goto restart; + } + + size = htmlValidateUtf8(ctxt, in, avail); + + if (size <= 0) { + skip = 1; + repl = BAD_CAST "\xEF\xBF\xBD"; + replSize = 3; + goto next_chunk; + } + + break; + } + +next_char: + in += size; + avail -= size; + } + +next_chunk: + if (ncrSize > 0) { + skip = ncrSize; + in -= ncrSize; + + repl = htmlCodePointToUtf8(cp, utf8Char, &replSize); + } + + chunkSize = in - chunk; + extraSize = chunkSize + replSize; + + if (extraSize > maxLength - used) { + htmlParseErr(ctxt, XML_ERR_RESOURCE_LIMIT, + "value too long\n", NULL, NULL); + goto error; + } + + if (extraSize > buffer_size - used) { + size_t newSize = (used + extraSize) * 2; + xmlChar *tmp = (xmlChar *) xmlRealloc(buffer, newSize + 1); + + if (tmp == NULL) { + htmlErrMemory(ctxt); + goto error; + } + buffer = tmp; + buffer_size = newSize; + } + + if (chunkSize > 0) { + input->cur += chunkSize; + memcpy(buffer + used, chunk, chunkSize); + used += chunkSize; + } + + input->cur += skip; + if (replSize > 0) { + memcpy(buffer + used, repl, replSize); + used += replSize; + } + + SHRINK; + + if (termSkip >= 0) + break; + +restart: + ; + } + + if (termSkip > 0) { + input->cur += termSkip; + col += termSkip; + } + + input->line = line; + input->col = col; + + ret = xmlMalloc(used + 1); + if (ret == NULL) { + htmlErrMemory(ctxt); + } else { + memcpy(ret, buffer, used); + ret[used] = 0; + } + +error: + ctxt->spaceTab = (void *) buffer; + ctxt->spaceMax = buffer_size; + + return(ret); +} + +/** + * htmlParseEntityRef: + * @ctxt: an HTML parser context + * @str: location to store the entity name + * + * DEPRECATED: Internal function, don't use. + * + * Returns NULL. + */ +const htmlEntityDesc * +htmlParseEntityRef(htmlParserCtxtPtr ctxt ATTRIBUTE_UNUSED, + const xmlChar **str ATTRIBUTE_UNUSED) { + return(NULL); +} + +/** + * htmlParseAttValue: + * @ctxt: an HTML parser context + * + * parse a value for an attribute + * Note: the parser won't do substitution of entities here, this + * will be handled later in xmlStringGetNodeList, unless it was + * asked for ctxt->replaceEntities != 0 + * + * Returns the AttValue parsed or NULL. + */ + +static xmlChar * +htmlParseAttValue(htmlParserCtxtPtr ctxt) { + xmlChar *ret = NULL; + int maxLength = (ctxt->options & HTML_PARSE_HUGE) ? + XML_MAX_HUGE_LENGTH : + XML_MAX_TEXT_LENGTH; + + if (CUR == '"') { + SKIP(1); + ret = htmlParseData(ctxt, MASK_DQ, 0, 1, maxLength); + if (CUR == '"') + SKIP(1); + } else if (CUR == '\'') { + SKIP(1); + ret = htmlParseData(ctxt, MASK_SQ, 0, 1, maxLength); + if (CUR == '\'') + SKIP(1); + } else { + ret = htmlParseData(ctxt, MASK_WS_GT, 0, 1, maxLength); + } + return(ret); +} + +static void +htmlCharDataSAXCallback(htmlParserCtxtPtr ctxt, const xmlChar *buf, + int size, int mode) { + if ((ctxt->sax == NULL) || (ctxt->disableSAX)) + return; + + if ((mode == 0) || (mode == DATA_RCDATA) || + (ctxt->sax->cdataBlock == NULL)) { + int blank = areBlanks(ctxt, buf, size); + + if ((mode == 0) && (blank > 0) && (!ctxt->keepBlanks)) { + if (ctxt->sax->ignorableWhitespace != NULL) + ctxt->sax->ignorableWhitespace(ctxt->userData, + buf, size); + } else { + if ((mode == 0) && (blank < 0)) + htmlCheckParagraph(ctxt); + + if (ctxt->sax->characters != NULL) + ctxt->sax->characters(ctxt->userData, buf, size); + } + } else { + /* + * Insert as CDATA, which is the same as HTML_PRESERVE_NODE + */ + ctxt->sax->cdataBlock(ctxt->userData, buf, size); + } +} + +/** + * htmlParseCharData: + * @ctxt: an HTML parser context + * @terminate: true if the input buffer is complete + * + * Parse character data and references. + */ + +static int +htmlParseCharData(htmlParserCtxtPtr ctxt) { + xmlParserInputPtr input = ctxt->input; + xmlChar utf8Char[4]; + int complete = 0; + int done = 0; + int mode; + int eof = PARSER_PROGRESSIVE(ctxt); + int line, col; + + mode = ctxt->endCheckState; + + line = input->line; + col = input->col; + + while (!PARSER_STOPPED(ctxt)) { + const xmlChar *chunk, *in, *repl; + size_t avail; + int replSize; + int skip = 0; + int ncr = 0; + int ncrSize = 0; + int cp = 0; + + chunk = input->cur; + avail = input->end - chunk; + in = chunk; + + repl = BAD_CAST ""; + replSize = 0; + + while (!PARSER_STOPPED(ctxt)) { + size_t j; + int cur, size; + + if (avail <= 64) { + if (!eof) { + size_t oldAvail = avail; + size_t off = in - chunk; + + input->cur = in; + + xmlParserGrow(ctxt); + + in = input->cur; + chunk = in - off; + input->cur = chunk; + avail = input->end - in; + + if (oldAvail == avail) + eof = 1; + } + + if (avail == 0) { + done = 1; + break; + } + } + + /* Accelerator */ + if (!ncr) { + while (avail > 0) { + static const unsigned mask[8] = { + 0x00002401, 0x10002040, + 0x00000000, 0x00000000, + 0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFFFFFF + }; + cur = *in; + if ((1u << (cur & 0x1F)) & mask[cur >> 5]) + break; + col += 1; + in += 1; + avail -= 1; + } + + if ((!eof) && (avail <= 64)) + continue; + if (avail == 0) + continue; + } + + cur = *in; + size = 1; + col += 1; + + if (ncr) { + int lc = cur | 0x20; + int digit; + + if ((cur >= '0') && (cur <= '9')) { + digit = cur - '0'; + } else if ((ncr == 16) && (lc >= 'a') && (lc <= 'f')) { + digit = (lc - 'a') + 10; + } else { + if (cur == ';') { + in += 1; + size += 1; + ncrSize += 1; + } + goto next_chunk; + } + + cp = cp * ncr + digit; + if (cp >= 0x110000) + cp = 0x110000; + + ncrSize += 1; + + goto next_char; + } + + switch (cur) { + case '<': + if (mode == 0) { + done = 1; + goto next_chunk; + } + if (mode == DATA_PLAINTEXT) + break; + + j = 1; + if (j < avail) { + if ((mode == DATA_SCRIPT) && (in[j] == '!')) { + /* Check for comment start */ + + j += 1; + if ((j < avail) && (in[j] == '-')) { + j += 1; + if ((j < avail) && (in[j] == '-')) + mode = DATA_SCRIPT_ESC1; + } + } else { + int i = 0; + int solidus = 0; + + /* Check for tag */ + + if (in[j] == '/') { + j += 1; + solidus = 1; + } + + if ((solidus) || (mode == DATA_SCRIPT_ESC1)) { + while ((j < avail) && + (ctxt->name[i] != 0) && + (ctxt->name[i] == (in[j] | 0x20))) { + i += 1; + j += 1; + } + + if ((ctxt->name[i] == 0) && (j < avail)) { + int c = in[j]; + + if ((c == '>') || (c == '/') || + (IS_WS_HTML(c))) { + if ((mode == DATA_SCRIPT_ESC1) && + (!solidus)) { + mode = DATA_SCRIPT_ESC2; + } else if (mode == DATA_SCRIPT_ESC2) { + mode = DATA_SCRIPT_ESC1; + } else { + complete = 1; + done = 1; + goto next_chunk; + } + } + } + } + } + } + + if ((mode != 0) && (PARSER_PROGRESSIVE(ctxt))) { + in += 1; + done = 1; + goto next_chunk; + } + + break; + + case '-': + if ((mode != DATA_SCRIPT_ESC1) && (mode != DATA_SCRIPT_ESC2)) + break; + + /* Check for comment end */ + + j = 1; + if ((j < avail) && (in[j] == '-')) { + j += 1; + if ((j < avail) && (in[j] == '>')) + mode = DATA_SCRIPT; + } + + break; + + case '&': + if ((mode != 0) && (mode != DATA_RCDATA)) + break; + + j = 1; + + if ((j < avail) && (in[j] == '#')) { + j += 1; + if (j < avail) { + if ((in[j] | 0x20) == 'x') { + j += 1; + if ((j < avail) && (IS_HEX_DIGIT(in[j]))) { + ncr = 16; + size = 3; + ncrSize = 3; + cp = 0; + } + } else if (IS_ASCII_DIGIT(in[j])) { + ncr = 10; + size = 2; + ncrSize = 2; + cp = 0; + } + } + } else { + repl = htmlFindEntityPrefix(in + j, + avail - j, + /* isAttr */ 0, + &skip, &replSize); + if (repl != NULL) { + skip += 1; + goto next_chunk; + } + + skip = 0; + } + + break; + + case '\0': + skip = 1; + repl = BAD_CAST "\xEF\xBF\xBD"; + replSize = 3; + goto next_chunk; + + case '\n': + line += 1; + col = 1; + break; + + case '\r': + skip = 1; + if (in[1] != 0x0A) { + repl = BAD_CAST "\x0A"; + replSize = 1; + } + goto next_chunk; + + default: + if (cur < 0x80) + break; + + if ((input->flags & XML_INPUT_HAS_ENCODING) == 0) { + xmlChar * guess; + + guess = htmlFindEncoding(ctxt); + if (guess == NULL) { + xmlSwitchEncoding(ctxt, XML_CHAR_ENCODING_8859_1); + } else { + xmlSwitchEncodingName(ctxt, (const char *) guess); + xmlFree(guess); + } + input->flags |= XML_INPUT_HAS_ENCODING; + + goto restart; + } + + size = htmlValidateUtf8(ctxt, in, avail); + + if (size <= 0) { + skip = 1; + repl = BAD_CAST "\xEF\xBF\xBD"; + replSize = 3; + goto next_chunk; + } + + break; + } + +next_char: + in += size; + avail -= size; + } + +next_chunk: + if (ncrSize > 0) { + skip = ncrSize; + in -= ncrSize; + + repl = htmlCodePointToUtf8(cp, utf8Char, &replSize); + } + + if (in > chunk) { + input->cur += in - chunk; + htmlCharDataSAXCallback(ctxt, chunk, in - chunk, mode); + } + + input->cur += skip; + if (replSize > 0) + htmlCharDataSAXCallback(ctxt, repl, replSize, mode); + + SHRINK; + + if (done) + break; + +restart: + ; + } + + input->line = line; + input->col = col; + + if (complete) + ctxt->endCheckState = 0; + else + ctxt->endCheckState = mode; + + return(complete); +} + +/** + * htmlSecureComment: + * @ctxt: an HTML parser context + * @mask: mask of terminating characters + * @refs: true if references are allowed + * @maxLength: maximum output length + * + * Securely parse comment until terminator is reached. + * + * Returns the parsed string or NULL in case of errors. + */ + +static xmlChar * +htmlSecureComment(htmlParserCtxtPtr ctxt, htmlAsciiMask mask, + int refs, int maxLength) { + xmlParserInputPtr input = ctxt->input; + xmlChar *ret = NULL; + xmlChar *buffer; + xmlChar utf8Char[4]; + size_t buffer_size; + size_t used; + int eof = PARSER_PROGRESSIVE(ctxt); + int line, col; + int termSkip = -1; + + used = 0; + buffer_size = ctxt->spaceMax; + buffer = (xmlChar *) ctxt->spaceTab; + if (buffer == NULL) { + buffer_size = 500; + buffer = xmlMalloc(buffer_size + 1); + if (buffer == NULL) { + htmlErrMemory(ctxt); + return(NULL); + } + } + + line = input->line; + col = input->col; + + while (!PARSER_STOPPED(ctxt)) { + const xmlChar *chunk, *in, *repl; + size_t avail, chunkSize, extraSize; + int replSize; + int skip = 0; + int ncr = 0; + int ncrSize = 0; + int cp = 0; + + chunk = input->cur; + avail = input->end - chunk; + in = chunk; + + repl = BAD_CAST ""; + replSize = 0; + + while (!PARSER_STOPPED(ctxt)) { + size_t j; + int cur, size; + + if ((!eof) && (avail <= 64)) { + size_t oldAvail = avail; + size_t off = in - chunk; + + input->cur = in; + + xmlParserGrow(ctxt); + + in = input->cur; + chunk = in - off; + input->cur = chunk; + avail = input->end - in; + + if (oldAvail == avail) + eof = 1; + } + + if (avail == 0) { + termSkip = 0; + break; + } + + cur = *in; + size = 1; + col += 1; + + if (htmlMaskMatch(mask, cur)) { + if (avail < 2) { + termSkip = 1; + } else if (in[1] == '-') { + if (avail < 3) { + termSkip = 2; + } else if (in[2] == '>') { + termSkip = 3; + } else if (in[2] == '!') { + if (avail < 4) + termSkip = 3; + else if (in[3] == '>') + termSkip = 4; + } + } + + if (termSkip >= 0) + break; + } + + if (ncr) { + int lc = cur | 0x20; + int digit; + + if ((cur >= '0') && (cur <= '9')) { + digit = cur - '0'; + } else if ((ncr == 16) && (lc >= 'a') && (lc <= 'f')) { + digit = (lc - 'a') + 10; + } else { + if (cur == ';') { + in += 1; + size += 1; + ncrSize += 1; + } + goto next_chunk; + } + + cp = cp * ncr + digit; + if (cp >= 0x110000) + cp = 0x110000; + + ncrSize += 1; + + goto next_char; + } + + switch (cur) { + case '&': + if (!refs) + break; + + j = 1; + + if ((j < avail) && (in[j] == '#')) { + j += 1; + if (j < avail) { + if ((in[j] | 0x20) == 'x') { + j += 1; + if ((j < avail) && (IS_HEX_DIGIT(in[j]))) { + ncr = 16; + size = 3; + ncrSize = 3; + cp = 0; + } + } else if (IS_ASCII_DIGIT(in[j])) { + ncr = 10; + size = 2; + ncrSize = 2; + cp = 0; + } + } + } else { + repl = htmlFindEntityPrefix(in + j, + avail - j, + /* isAttr */ 1, + &skip, &replSize); + if (repl != NULL) { + skip += 1; + goto next_chunk; + } + + skip = 0; + } + + break; + + case '\0': + skip = 1; + repl = BAD_CAST "\xEF\xBF\xBD"; + replSize = 3; + goto next_chunk; + + case '\n': + line += 1; + col = 1; + break; + + case '\r': + skip = 1; + if (in[1] != 0x0A) { + repl = BAD_CAST "\x0A"; + replSize = 1; + } + goto next_chunk; + + case '%': + char c = 0; + + if ((in[1] >= '0') && (in[1] <= '9')) { + c = in[1] - '0'; + } else if ((in[1] >= 'a') && (in[1] <= 'f')) { + c = (in[1] - 'a') + 10; + } else if ((in[1] >= 'A') && (in[1] <= 'F')) { + c = (in[1] - 'F') + 10; + } else { + break; + } + + c <<= 4; + + if ((in[2] >= '0') && (in[2] <= '9')) { + c |= in[2] - '0'; + } else if ((in[2] >= 'a') && (in[2] <= 'f')) { + c |= (in[2] - 'a') + 10; + } else if ((in[2] >= 'A') && (in[2] <= 'F')) { + c |= (in[2] - 'F') + 10; + } else { + break; + } + + if (IS_ASCII_DIGIT(c) || IS_ASCII_LETTER(c) ) { + skip = 3; + repl = BAD_CAST &c; + replSize = 1; + + goto next_chunk; + } + break; + + default: + if (cur < 0x80) + break; + + if ((input->flags & XML_INPUT_HAS_ENCODING) == 0) { + xmlChar * guess; + + guess = htmlFindEncoding(ctxt); + if (guess == NULL) { + xmlSwitchEncoding(ctxt, XML_CHAR_ENCODING_8859_1); + } else { + xmlSwitchEncodingName(ctxt, (const char *) guess); + xmlFree(guess); + } + input->flags |= XML_INPUT_HAS_ENCODING; + + goto restart; + } + + size = htmlValidateUtf8(ctxt, in, avail); + + if (size <= 0) { + skip = 1; + repl = BAD_CAST "\xEF\xBF\xBD"; + replSize = 3; + goto next_chunk; + } + + break; + } + +next_char: + in += size; + avail -= size; + } + +next_chunk: + if (ncrSize > 0) { + skip = ncrSize; + in -= ncrSize; + + repl = htmlCodePointToUtf8(cp, utf8Char, &replSize); + } + + chunkSize = in - chunk; + extraSize = chunkSize + replSize; + + if (extraSize > buffer_size) { + size_t newSize = (used + extraSize) * 2; + xmlChar *tmp = (xmlChar *) xmlRealloc(buffer, newSize + 1); + + if (tmp == NULL) { + htmlErrMemory(ctxt); + goto error; + } + buffer = tmp; + buffer_size = newSize; + } + + if (chunkSize > 0) { + input->cur += chunkSize; + memcpy(buffer + used, chunk, chunkSize); + used += chunkSize; + } + + input->cur += skip; + if (replSize > 0) { + memcpy(buffer + used, repl, replSize); + used += replSize; + } + + SHRINK; + + if (termSkip >= 0) + break; + +restart: + ; + } + + if (termSkip > 0) { + input->cur += termSkip; + col += termSkip; + } + + input->line = line; + input->col = col; + + ret = xmlMalloc(used + 1); + if (ret == NULL) { + htmlErrMemory(ctxt); + } else { + memcpy(ret, buffer, used); + ret[used] = 0; + } + +error: + ctxt->spaceTab = (void *) buffer; + ctxt->spaceMax = buffer_size; + + return(ret); +} + +/** + * htmlParseComment: + * @ctxt: an HTML parser context + * @bogus: true if this is a bogus comment + * + * Parse an HTML comment + */ +static void +htmlParseComment(htmlParserCtxtPtr ctxt, int bogus) { + const xmlChar *comment = BAD_CAST ""; + xmlChar *buf = NULL; + int maxLength = (ctxt->options & HTML_PARSE_HUGE) ? + XML_MAX_HUGE_LENGTH : + XML_MAX_TEXT_LENGTH; + + if (bogus) { + buf = htmlParseData(ctxt, MASK_GT, 0, 0, maxLength); + if (CUR == '>') + SKIP(1); + comment = buf; + } else { + if (CUR == '>') { + SKIP(1); + } else if ((CUR == '-') && (NXT(1) == '>')) { + SKIP(2); + } else { + buf = htmlParseData(ctxt, MASK_DASH, 1, 0, maxLength); + comment = buf; + } + } + + if (comment == NULL) + return; + + if ((ctxt->sax != NULL) && (ctxt->sax->comment != NULL) && + (!ctxt->disableSAX)) + ctxt->sax->comment(ctxt->userData, comment); + + xmlFree(buf); +} + +/** + * htmlTopParseComment: + * @ctxt: an HTML parser context + * @bogus: true if this is a bogus comment + * + * Parse an HTML comment at the beginning of the document + */ +static void +htmlTopParseComment(htmlParserCtxtPtr ctxt, int bogus) { + const xmlChar *comment = BAD_CAST ""; + xmlChar *buf = NULL; + int maxLength = (ctxt->options & HTML_PARSE_HUGE) ? + XML_MAX_HUGE_LENGTH : + XML_MAX_TEXT_LENGTH; + + if (bogus) { + buf = htmlParseData(ctxt, MASK_GT, 0, 0, maxLength); + if (CUR == '>') + SKIP(1); + comment = buf; + } else { + if (CUR == '>') { + SKIP(1); + } else if ((CUR == '-') && (NXT(1) == '>')) { + SKIP(2); + } else { + buf = htmlSecureComment(ctxt, MASK_DASH, 0, maxLength); + comment = buf; + } + } + + if (comment == NULL) + return; + + if ((ctxt->sax != NULL) && (ctxt->sax->comment != NULL) && + (!ctxt->disableSAX)) + ctxt->sax->comment(ctxt->userData, comment); + + xmlFree(buf); +} + +/** + * htmlParseCharRef: + * @ctxt: an HTML parser context + * + * DEPRECATED: Internal function, don't use. + * + * Returns 0 + */ +int +htmlParseCharRef(htmlParserCtxtPtr ctxt ATTRIBUTE_UNUSED) { + return(0); +} + + +/** + * htmlParseDoctypeLiteral: + * @ctxt: an HTML parser context + * + * Parse a DOCTYPE SYTSTEM or PUBLIC literal. + * + * Returns the literal or NULL in case of error. + */ + +static xmlChar * +htmlParseDoctypeLiteral(htmlParserCtxtPtr ctxt) { + xmlChar *ret; + int maxLength = (ctxt->options & HTML_PARSE_HUGE) ? + XML_MAX_TEXT_LENGTH : + XML_MAX_NAME_LENGTH; + + if (CUR == '"') { + SKIP(1); + ret = htmlParseData(ctxt, MASK_DQ_GT, 0, 0, maxLength); + if (CUR == '"') + SKIP(1); + } else if (CUR == '\'') { + SKIP(1); + ret = htmlParseData(ctxt, MASK_SQ_GT, 0, 0, maxLength); + if (CUR == '\'') + SKIP(1); + } else { + return(NULL); + } + + return(ret); +} + +static void +htmlSkipBogusDoctype(htmlParserCtxtPtr ctxt) { + const xmlChar *in; + size_t avail; + int eof = PARSER_PROGRESSIVE(ctxt); + int line, col; + + line = ctxt->input->line; + col = ctxt->input->col; + + in = ctxt->input->cur; + avail = ctxt->input->end - in; + + while (!PARSER_STOPPED(ctxt)) { + int cur; + + if ((!eof) && (avail <= 64)) { + size_t oldAvail = avail; + + ctxt->input->cur = in; + + xmlParserGrow(ctxt); + + in = ctxt->input->cur; + avail = ctxt->input->end - in; + + if (oldAvail == avail) + eof = 1; + } + + if (avail == 0) + break; + + col += 1; + + cur = *in; + if (cur == '>') { + in += 1; + break; + } else if (cur == 0x0A) { + line += 1; + col = 1; + } + + in += 1; + avail -= 1; + + SHRINK; + } + + ctxt->input->cur = in; + ctxt->input->line = line; + ctxt->input->col = col; +} + +/** + * htmlParseDocTypeDecl: + * @ctxt: an HTML parser context + * + * Parse a DOCTYPE declaration. + */ + +static void +htmlParseDocTypeDecl(htmlParserCtxtPtr ctxt) { + xmlChar *name = NULL; + xmlChar *publicId = NULL; + xmlChar *URI = NULL; + int maxLength = (ctxt->options & HTML_PARSE_HUGE) ? + XML_MAX_TEXT_LENGTH : + XML_MAX_NAME_LENGTH; + + /* + * We know that 'input->cur < ctxt->input->end) && (CUR != '>')) { + name = htmlParseData(ctxt, MASK_WS_GT, 0, 0, maxLength); + + if ((ctxt->options & HTML_PARSE_HTML5) && (name != NULL)) { + xmlChar *cur; + + for (cur = name; *cur; cur++) { + if (IS_UPPER(*cur)) + *cur += 0x20; + } + } + + SKIP_BLANKS; + } + + /* + * Check for SystemID and publicId + */ + if ((UPPER == 'P') && (UPP(1) == 'U') && + (UPP(2) == 'B') && (UPP(3) == 'L') && + (UPP(4) == 'I') && (UPP(5) == 'C')) { + SKIP(6); + SKIP_BLANKS; + publicId = htmlParseDoctypeLiteral(ctxt); + if (publicId == NULL) + goto bogus; + SKIP_BLANKS; + URI = htmlParseDoctypeLiteral(ctxt); + } else if ((UPPER == 'S') && (UPP(1) == 'Y') && + (UPP(2) == 'S') && (UPP(3) == 'T') && + (UPP(4) == 'E') && (UPP(5) == 'M')) { + SKIP(6); + SKIP_BLANKS; + URI = htmlParseDoctypeLiteral(ctxt); + } + +bogus: + htmlSkipBogusDoctype(ctxt); + + /* + * Create or update the document accordingly to the DOCTYPE + */ + if ((ctxt->sax != NULL) && (ctxt->sax->internalSubset != NULL) && + (!ctxt->disableSAX)) + ctxt->sax->internalSubset(ctxt->userData, name, publicId, URI); + + xmlFree(name); + xmlFree(URI); + xmlFree(publicId); +} + +/** + * htmlParseAttribute: + * @ctxt: an HTML parser context + * @value: a xmlChar ** used to store the value of the attribute + * + * parse an attribute + * + * [41] Attribute ::= Name Eq AttValue + * + * [25] Eq ::= S? '=' S? + * + * With namespace: + * + * [NS 11] Attribute ::= QName Eq AttValue + * + * Also the case QName == xmlns:??? is handled independently as a namespace + * definition. + * + * Returns the attribute name, and the value in *value. + */ + +static xmlHashedString +htmlParseAttribute(htmlParserCtxtPtr ctxt, xmlChar **value) { + xmlHashedString hname; + xmlChar *val = NULL; + + *value = NULL; + hname = htmlParseHTMLName(ctxt, 1); + if (hname.name == NULL) + return(hname); + + /* + * read the value + */ + SKIP_BLANKS; + if (CUR == '=') { + SKIP(1); + SKIP_BLANKS; + val = htmlParseAttValue(ctxt); + } + + *value = val; + return(hname); +} + +/** + * htmlCheckEncoding: + * @ctxt: an HTML parser context + * @attvalue: the attribute value + * + * Checks an http-equiv attribute from a Meta tag to detect + * the encoding + * If a new encoding is detected the parser is switched to decode + * it and pass UTF8 + */ +static void +htmlCheckEncoding(htmlParserCtxtPtr ctxt, const xmlChar *attvalue) { + const xmlChar *encoding; + xmlChar *copy; + + if (!attvalue) + return; + + encoding = xmlStrcasestr(attvalue, BAD_CAST"charset"); + if (encoding != NULL) { + encoding += 7; + } + /* + * skip blank + */ + if (encoding && IS_WS_HTML(*encoding)) + encoding = xmlStrcasestr(attvalue, BAD_CAST"="); + if (encoding && *encoding == '=') { + encoding ++; + copy = xmlStrdup(encoding); + if (copy == NULL) + htmlErrMemory(ctxt); + xmlSetDeclaredEncoding(ctxt, copy); + } +} + +/** + * htmlCheckMeta: + * @ctxt: an HTML parser context + * @atts: the attributes values + * + * Checks an attributes from a Meta tag + */ +static void +htmlCheckMeta(htmlParserCtxtPtr ctxt, const xmlChar **atts) { + int i; + const xmlChar *att, *value; + int http = 0; + const xmlChar *content = NULL; + + if ((ctxt == NULL) || (atts == NULL)) + return; + + i = 0; + att = atts[i++]; + while (att != NULL) { + value = atts[i++]; + if (value != NULL) { + if ((!xmlStrcasecmp(att, BAD_CAST "http-equiv")) && + (!xmlStrcasecmp(value, BAD_CAST "Content-Type"))) { + http = 1; + } else if (!xmlStrcasecmp(att, BAD_CAST "charset")) { + xmlChar *copy; + + copy = xmlStrdup(value); + if (copy == NULL) + htmlErrMemory(ctxt); + xmlSetDeclaredEncoding(ctxt, copy); + } else if (!xmlStrcasecmp(att, BAD_CAST "content")) { + content = value; + } + } + att = atts[i++]; + } + if ((http) && (content != NULL)) + htmlCheckEncoding(ctxt, content); + +} + +/** + * htmlAttrHashInsert: + * @ctxt: parser context + * @size: size of the hash table + * @name: attribute name + * @hashValue: hash value of name + * @aindex: attribute index (this is a multiple of 5) + * + * Inserts a new attribute into the hash table. + * + * Returns INT_MAX if no existing attribute was found, the attribute + * index if an attribute was found, -1 if a memory allocation failed. + */ +static int +htmlAttrHashInsert(xmlParserCtxtPtr ctxt, unsigned size, const xmlChar *name, + unsigned hashValue, int aindex) { + xmlAttrHashBucket *table = ctxt->attrHash; + xmlAttrHashBucket *bucket; + unsigned hindex; + + hindex = hashValue & (size - 1); + bucket = &table[hindex]; + + while (bucket->index >= 0) { + const xmlChar **atts = &ctxt->atts[bucket->index]; + + if (name == atts[0]) + return(bucket->index); + + hindex++; + bucket++; + if (hindex >= size) { + hindex = 0; + bucket = table; + } + } + + bucket->index = aindex; + + return(INT_MAX); +} + +/** + * htmlParseStartTag: + * @ctxt: an HTML parser context + * + * parse a start of tag either for rule element or + * EmptyElement. In both case we don't parse the tag closing chars. + * + * [40] STag ::= '<' Name (S Attribute)* S? '>' + * + * [44] EmptyElemTag ::= '<' Name (S Attribute)* S? '/>' + * + * With namespace: + * + * [NS 8] STag ::= '<' QName (S Attribute)* S? '>' + * + * [NS 10] EmptyElement ::= '<' QName (S Attribute)* S? '/>' + * + * Returns 0 in case of success, -1 in case of error and 1 if discarded + */ + +static void +htmlParseStartTag(htmlParserCtxtPtr ctxt) { + const xmlChar *name; + const xmlChar *attname; + xmlChar *attvalue; + const xmlChar **atts; + int nbatts = 0; + int maxatts; + int meta = 0; + int i; + int discardtag = 0; + + ctxt->endCheckState = 0; + + SKIP(1); + + atts = ctxt->atts; + maxatts = ctxt->maxatts; + + GROW; + name = htmlParseHTMLName(ctxt, 0).name; + if (name == NULL) + return; + if (xmlStrEqual(name, BAD_CAST"meta")) + meta = 1; + + if ((ctxt->options & HTML_PARSE_HTML5) == 0) { + /* + * Check for auto-closure of HTML elements. + */ + htmlAutoClose(ctxt, name); + + /* + * Check for implied HTML elements. + */ + htmlCheckImplied(ctxt, name); + + /* + * Avoid html at any level > 0, head at any level != 1 + * or any attempt to recurse body + */ + if ((ctxt->nameNr > 0) && (xmlStrEqual(name, BAD_CAST"html"))) { + htmlParseErr(ctxt, XML_HTML_STRUCURE_ERROR, + "htmlParseStartTag: misplaced tag\n", + name, NULL); + discardtag = 1; + ctxt->depth++; + } + if ((ctxt->nameNr != 1) && + (xmlStrEqual(name, BAD_CAST"head"))) { + htmlParseErr(ctxt, XML_HTML_STRUCURE_ERROR, + "htmlParseStartTag: misplaced tag\n", + name, NULL); + discardtag = 1; + ctxt->depth++; + } + if (xmlStrEqual(name, BAD_CAST"body")) { + int indx; + for (indx = 0;indx < ctxt->nameNr;indx++) { + if (xmlStrEqual(ctxt->nameTab[indx], BAD_CAST"body")) { + htmlParseErr(ctxt, XML_HTML_STRUCURE_ERROR, + "htmlParseStartTag: misplaced tag\n", + name, NULL); + discardtag = 1; + ctxt->depth++; + } + } + } + } + + /* + * Now parse the attributes, it ends up with the ending + * + * (S Attribute)* S? + */ + SKIP_BLANKS; + while ((ctxt->input->cur < ctxt->input->end) && + (CUR != '>') && + ((CUR != '/') || (NXT(1) != '>')) && + (PARSER_STOPPED(ctxt) == 0)) { + xmlHashedString hattname; + + /* unexpected-solidus-in-tag */ + if (CUR == '/') { + SKIP(1); + SKIP_BLANKS; + continue; + } + GROW; + hattname = htmlParseAttribute(ctxt, &attvalue); + attname = hattname.name; + + if (attname != NULL) { + /* + * Add the pair to atts + */ + if (nbatts + 4 > maxatts) { + const xmlChar **tmp; + unsigned *utmp; + size_t newSize = maxatts ? maxatts * 2 : 22; + + tmp = xmlMalloc(newSize * sizeof(tmp[0])); + if (tmp == NULL) { + htmlErrMemory(ctxt); + if (attvalue != NULL) + xmlFree(attvalue); + goto failed; + } + + utmp = xmlRealloc(ctxt->attallocs, + newSize / 2 * sizeof(utmp[0])); + if (utmp == NULL) { + htmlErrMemory(ctxt); + if (attvalue != NULL) + xmlFree(attvalue); + xmlFree(tmp); + goto failed; + } + + if (maxatts > 0) + memcpy(tmp, atts, maxatts * sizeof(tmp[0])); + xmlFree(atts); + + atts = tmp; + maxatts = newSize; + ctxt->atts = atts; + ctxt->attallocs = utmp; + ctxt->maxatts = maxatts; + } + + ctxt->attallocs[nbatts/2] = hattname.hashValue; + atts[nbatts++] = attname; + atts[nbatts++] = attvalue; + } + else { + if (attvalue != NULL) + xmlFree(attvalue); + } + +failed: + SKIP_BLANKS; + } + + if (ctxt->input->cur >= ctxt->input->end) { + discardtag = 1; + goto done; + } + + /* + * Verify that attribute names are unique. + */ + if (nbatts > 2) { + unsigned attrHashSize; + int j, k; + + attrHashSize = 4; + while (attrHashSize / 2 < (unsigned) nbatts / 2) + attrHashSize *= 2; + + if (attrHashSize > ctxt->attrHashMax) { + xmlAttrHashBucket *tmp; + + tmp = xmlRealloc(ctxt->attrHash, attrHashSize * sizeof(tmp[0])); + if (tmp == NULL) { + htmlErrMemory(ctxt); + goto done; + } + + ctxt->attrHash = tmp; + ctxt->attrHashMax = attrHashSize; + } + + memset(ctxt->attrHash, -1, attrHashSize * sizeof(ctxt->attrHash[0])); + + for (i = 0, j = 0, k = 0; i < nbatts; i += 2, k++) { + unsigned hashValue; + int res; + + attname = atts[i]; + hashValue = ctxt->attallocs[k] | 0x80000000; + + res = htmlAttrHashInsert(ctxt, attrHashSize, attname, + hashValue, j); + if (res < 0) + continue; + + if (res == INT_MAX) { + atts[j] = atts[i]; + atts[j+1] = atts[i+1]; + j += 2; + } else { + xmlFree((xmlChar *) atts[i+1]); + } + } + + nbatts = j; + } + + if (nbatts > 0) { + atts[nbatts] = NULL; + atts[nbatts + 1] = NULL; + + /* + * Handle specific association to the META tag + */ + if (meta) + htmlCheckMeta(ctxt, atts); + } + + /* + * SAX: Start of Element ! + */ + if (!discardtag) { + if (ctxt->options & HTML_PARSE_HTML5) { + if (ctxt->nameNr > 0) + htmlnamePop(ctxt); + } + + htmlnamePush(ctxt, name); + if ((ctxt->sax != NULL) && (ctxt->sax->startElement != NULL)) { + if (nbatts != 0) + ctxt->sax->startElement(ctxt->userData, name, atts); + else + ctxt->sax->startElement(ctxt->userData, name, NULL); + } + } + +done: + if (atts != NULL) { + for (i = 1;i < nbatts;i += 2) { + if (atts[i] != NULL) + xmlFree((xmlChar *) atts[i]); + } + } +} + +/** + * htmlParseEndTag: + * @ctxt: an HTML parser context + * + * parse an end of tag + * + * [42] ETag ::= '' + * + * With namespace + * + * [NS 9] ETag ::= '' + * + * Returns 1 if the current level should be closed. + */ + +static void +htmlParseEndTag(htmlParserCtxtPtr ctxt) +{ + const xmlChar *name; + const xmlChar *oldname; + int i; + + ctxt->endCheckState = 0; + + SKIP(2); + + if (CUR == '>') { + SKIP(1); + return; + } + + if (!IS_ASCII_LETTER(CUR)) { + htmlParseComment(ctxt, /* bogus */ 1); + return; + } + + name = htmlParseHTMLName(ctxt, 0).name; + if (name == NULL) + return; + + /* + * Parse and ignore attributes. + */ + SKIP_BLANKS; + while ((ctxt->input->cur < ctxt->input->end) && + (CUR != '>') && + ((CUR != '/') || (NXT(1) != '>')) && + (ctxt->instate != XML_PARSER_EOF)) { + xmlChar *attvalue = NULL; + + /* unexpected-solidus-in-tag */ + if (CUR == '/') { + SKIP(1); + SKIP_BLANKS; + continue; + } + GROW; + htmlParseAttribute(ctxt, &attvalue); + if (attvalue != NULL) + xmlFree(attvalue); + + SKIP_BLANKS; + } + + if (CUR == '>') { + SKIP(1); + } else if ((CUR == '/') && (NXT(1) == '>')) { + SKIP(2); + } else { + return; + } + + if (ctxt->options & HTML_PARSE_HTML5) { + if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, name); + return; + } + + /* + * if we ignored misplaced tags in htmlParseStartTag don't pop them + * out now. + */ + if ((ctxt->depth > 0) && + (xmlStrEqual(name, BAD_CAST "html") || + xmlStrEqual(name, BAD_CAST "body") || + xmlStrEqual(name, BAD_CAST "head"))) { + ctxt->depth--; + return; + } + + /* + * If the name read is not one of the element in the parsing stack + * then return, it's just an error. + */ + for (i = (ctxt->nameNr - 1); i >= 0; i--) { + if (xmlStrEqual(name, ctxt->nameTab[i])) + break; + } + if (i < 0) { + htmlParseErr(ctxt, XML_ERR_TAG_NAME_MISMATCH, + "Unexpected end tag : %s\n", name, NULL); + return; + } + + + /* + * Check for auto-closure of HTML elements. + */ + + htmlAutoCloseOnClose(ctxt, name); + + /* + * Well formedness constraints, opening and closing must match. + * With the exception that the autoclose may have popped stuff out + * of the stack. + */ + if ((ctxt->name != NULL) && (!xmlStrEqual(ctxt->name, name))) { + htmlParseErr(ctxt, XML_ERR_TAG_NAME_MISMATCH, + "Opening and ending tag mismatch: %s and %s\n", + name, ctxt->name); + } + + /* + * SAX: End of Tag + */ + oldname = ctxt->name; + if ((oldname != NULL) && (xmlStrEqual(oldname, name))) { + htmlParserFinishElementParsing(ctxt); + if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, name); + htmlnamePop(ctxt); + } +} + +/** + * htmlParseContent: + * @ctxt: an HTML parser context + * + * Parse a content: comment, sub-element, reference or text. + * New version for non recursive htmlParseElementInternal + */ + +static void +htmlParseContent(htmlParserCtxtPtr ctxt) { + GROW; + + while ((PARSER_STOPPED(ctxt) == 0) && + (ctxt->input->cur < ctxt->input->end)) { + int mode; + + mode = ctxt->endCheckState; + + if ((mode == 0) && (CUR == '<')) { + if (NXT(1) == '/') { + htmlParseEndTag(ctxt); + } else if (NXT(1) == '!') { + /* + * Sometimes DOCTYPE arrives in the middle of the document + */ + if ((UPP(2) == 'D') && (UPP(3) == 'O') && + (UPP(4) == 'C') && (UPP(5) == 'T') && + (UPP(6) == 'Y') && (UPP(7) == 'P') && + (UPP(8) == 'E')) { + htmlParseDocTypeDecl(ctxt); + } else if ((NXT(2) == '-') && (NXT(3) == '-')) { + SKIP(4); + htmlParseComment(ctxt, /* bogus */ 0); + } else { + SKIP(2); + htmlParseComment(ctxt, /* bogus */ 1); + } + } else if (NXT(1) == '?') { + SKIP(1); + htmlParseComment(ctxt, /* bogus */ 1); + } else if (IS_ASCII_LETTER(NXT(1))) { + htmlParseElementInternal(ctxt); + } else { + htmlCheckParagraph(ctxt); + if ((ctxt->sax != NULL) && (!ctxt->disableSAX) && + (ctxt->sax->characters != NULL)) + ctxt->sax->characters(ctxt->userData, BAD_CAST "<", 1); + SKIP(1); + } + } else { + htmlParseCharData(ctxt); + } + + SHRINK; + GROW; + } + + if (ctxt->input->cur >= ctxt->input->end) + htmlAutoCloseOnEnd(ctxt); +} + +/** + * htmlParseElementInternal: + * @ctxt: an HTML parser context + * + * parse an HTML element, new version, non recursive + * + * [39] element ::= EmptyElemTag | STag content ETag + * + * [41] Attribute ::= Name Eq AttValue + */ + +static int +htmlParseElementInternal(htmlParserCtxtPtr ctxt) { + const xmlChar *name; + const htmlElemDesc * info; + htmlParserNodeInfo node_info = { NULL, 0, 0, 0, 0 }; + + if ((ctxt == NULL) || (ctxt->input == NULL)) + return(0); + + /* Capture start position */ + if (ctxt->record_info) { + node_info.begin_pos = ctxt->input->consumed + + (CUR_PTR - ctxt->input->base); + node_info.begin_line = ctxt->input->line; + } + + htmlParseStartTag(ctxt); + name = ctxt->name; + if (name == NULL) + return(0); + + if (ctxt->record_info) + htmlNodeInfoPush(ctxt, &node_info); + + /* + * Check for an Empty Element labeled the XML/SGML way + */ + if ((CUR == '/') && (NXT(1) == '>')) { + SKIP(2); + htmlParserFinishElementParsing(ctxt); + if ((ctxt->options & HTML_PARSE_HTML5) == 0) { + if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, name); + } + htmlnamePop(ctxt); + return(0); + } + + if (CUR != '>') + return(0); + SKIP(1); + + /* + * Lookup the info for that element. + */ + info = htmlTagLookup(name); + + /* + * Check for an Empty Element from DTD definition + */ + if ((info != NULL) && (info->empty)) { + htmlParserFinishElementParsing(ctxt); + if ((ctxt->options & HTML_PARSE_HTML5) == 0) { + if ((ctxt->sax != NULL) && (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, name); + } + htmlnamePop(ctxt); + return(0); + } + + if (info != NULL) + ctxt->endCheckState = info->dataMode; + + return(1); +} + +/** + * htmlParseElement: + * @ctxt: an HTML parser context + * + * DEPRECATED: Internal function, don't use. + * + * parse an HTML element, this is highly recursive + * this is kept for compatibility with previous code versions + * + * [39] element ::= EmptyElemTag | STag content ETag + * + * [41] Attribute ::= Name Eq AttValue + */ + +void +htmlParseElement(htmlParserCtxtPtr ctxt) { + const xmlChar *oldptr; + int depth; + + if ((ctxt == NULL) || (ctxt->input == NULL)) + return; + + if (htmlParseElementInternal(ctxt) == 0) + return; + + /* + * Parse the content of the element: + */ + depth = ctxt->nameNr; + while (CUR != 0) { + oldptr = ctxt->input->cur; + htmlParseContent(ctxt); + if (oldptr==ctxt->input->cur) break; + if (ctxt->nameNr < depth) break; + } + + if (CUR == 0) { + htmlAutoCloseOnEnd(ctxt); + } +} + +xmlNodePtr +htmlCtxtParseContentInternal(htmlParserCtxtPtr ctxt, xmlParserInputPtr input) { + xmlNodePtr root; + xmlNodePtr list = NULL; + xmlChar *rootName = BAD_CAST "#root"; + + root = xmlNewDocNode(ctxt->myDoc, NULL, rootName, NULL); + if (root == NULL) { + htmlErrMemory(ctxt); + return(NULL); + } + + if (xmlCtxtPushInput(ctxt, input) < 0) { + xmlFreeNode(root); + return(NULL); + } + + htmlnamePush(ctxt, rootName); + nodePush(ctxt, root); + + htmlParseContent(ctxt); + + /* TODO: Use xmlCtxtIsCatastrophicError */ + if (ctxt->errNo != XML_ERR_NO_MEMORY) { + xmlNodePtr cur; + + /* + * Unlink newly created node list. + */ + list = root->children; + root->children = NULL; + root->last = NULL; + for (cur = list; cur != NULL; cur = cur->next) + cur->parent = NULL; + } + + nodePop(ctxt); + htmlnamePop(ctxt); + + xmlCtxtPopInput(ctxt); + + xmlFreeNode(root); + return(list); +} + +/** + * htmlParseDocument: + * @ctxt: an HTML parser context + * + * Parse an HTML document and invoke the SAX handlers. This is useful + * if you're only interested in custom SAX callbacks. If you want a + * document tree, use htmlCtxtParseDocument. + * + * Returns 0, -1 in case of error. + */ + +int +htmlParseDocument(htmlParserCtxtPtr ctxt) { + xmlDtdPtr dtd; + + if ((ctxt == NULL) || (ctxt->input == NULL)) + return(-1); + + if ((ctxt->sax) && (ctxt->sax->setDocumentLocator)) { + ctxt->sax->setDocumentLocator(ctxt->userData, + (xmlSAXLocator *) &xmlDefaultSAXLocator); + } + + xmlDetectEncoding(ctxt); + + /* + * This is wrong but matches long-standing behavior. In most cases, + * a document starting with an XML declaration will specify UTF-8. + */ + if (((ctxt->input->flags & XML_INPUT_HAS_ENCODING) == 0) && + (xmlStrncmp(ctxt->input->cur, BAD_CAST "sax) && (ctxt->sax->startDocument) && (!ctxt->disableSAX)) + ctxt->sax->startDocument(ctxt->userData); + + /* + * Parse possible comments and PIs before any content + */ + while (CUR == '<') { + if ((NXT(1) == '!') && (NXT(2) == '-') && (NXT(3) == '-')) { + SKIP(4); + htmlTopParseComment(ctxt, /* bogus */ 0); + } else if (NXT(1) == '?') { + SKIP(1); + htmlTopParseComment(ctxt, /* bogus */ 1); + } else { + break; + } + SKIP_BLANKS; + } + + /* + * Then possibly doc type declaration(s) and more Misc + * (doctypedecl Misc*)? + */ + if ((CUR == '<') && (NXT(1) == '!') && + (UPP(2) == 'D') && (UPP(3) == 'O') && + (UPP(4) == 'C') && (UPP(5) == 'T') && + (UPP(6) == 'Y') && (UPP(7) == 'P') && + (UPP(8) == 'E')) { + ctxt->instate = XML_PARSER_MISC; + htmlParseDocTypeDecl(ctxt); + } + SKIP_BLANKS; + + /* + * Parse possible comments and PIs before any content + */ + ctxt->instate = XML_PARSER_PROLOG; + while (CUR == '<') { + if ((NXT(1) == '!') && (NXT(2) == '-') && (NXT(3) == '-')) { + SKIP(4); + htmlParseComment(ctxt, /* bogus */ 0); + } else if (NXT(1) == '?') { + SKIP(1); + htmlParseComment(ctxt, /* bogus */ 1); + } else { + break; + } + SKIP_BLANKS; + } + + /* + * Time to start parsing the tree itself + */ + ctxt->instate = XML_PARSER_CONTENT; + htmlParseContent(ctxt); + + /* + * autoclose + */ + if (CUR == 0) + htmlAutoCloseOnEnd(ctxt); + + + /* + * SAX: end of the document processing. + */ + if ((ctxt->sax) && (ctxt->sax->endDocument != NULL)) + ctxt->sax->endDocument(ctxt->userData); + + if ((!(ctxt->options & HTML_PARSE_NODEFDTD)) && (ctxt->myDoc != NULL)) { + dtd = xmlGetIntSubset(ctxt->myDoc); + if (dtd == NULL) { + ctxt->myDoc->intSubset = + xmlCreateIntSubset(ctxt->myDoc, BAD_CAST "html", + BAD_CAST "-//W3C//DTD HTML 4.0 Transitional//EN", + BAD_CAST "http://www.w3.org/TR/REC-html40/loose.dtd"); + if (ctxt->myDoc->intSubset == NULL) + htmlErrMemory(ctxt); + } + } + if (! ctxt->wellFormed) return(-1); + return(0); +} + + +/************************************************************************ + * * + * Parser contexts handling * + * * + ************************************************************************/ + +/** + * htmlInitParserCtxt: + * @ctxt: an HTML parser context + * @sax: SAX handler + * @userData: user data + * + * Initialize a parser context + * + * Returns 0 in case of success and -1 in case of error + */ + +static int +htmlInitParserCtxt(htmlParserCtxtPtr ctxt, const htmlSAXHandler *sax, + void *userData) +{ + if (ctxt == NULL) return(-1); + memset(ctxt, 0, sizeof(htmlParserCtxt)); + + ctxt->dict = xmlDictCreate(); + if (ctxt->dict == NULL) + return(-1); + + if (ctxt->sax == NULL) + ctxt->sax = (htmlSAXHandler *) xmlMalloc(sizeof(htmlSAXHandler)); + if (ctxt->sax == NULL) + return(-1); + if (sax == NULL) { + memset(ctxt->sax, 0, sizeof(htmlSAXHandler)); + xmlSAX2InitHtmlDefaultSAXHandler(ctxt->sax); + ctxt->userData = ctxt; + } else { + memcpy(ctxt->sax, sax, sizeof(htmlSAXHandler)); + ctxt->userData = userData ? userData : ctxt; + } + + /* Allocate the Input stack */ + ctxt->inputTab = (htmlParserInputPtr *) + xmlMalloc(5 * sizeof(htmlParserInputPtr)); + if (ctxt->inputTab == NULL) + return(-1); + ctxt->inputNr = 0; + ctxt->inputMax = 5; + ctxt->input = NULL; + ctxt->version = NULL; + ctxt->encoding = NULL; + ctxt->standalone = -1; + ctxt->instate = XML_PARSER_START; + + /* Allocate the Node stack */ + ctxt->nodeTab = (htmlNodePtr *) xmlMalloc(10 * sizeof(htmlNodePtr)); + if (ctxt->nodeTab == NULL) + return(-1); + ctxt->nodeNr = 0; + ctxt->nodeMax = 10; + ctxt->node = NULL; + + /* Allocate the Name stack */ + ctxt->nameTab = (const xmlChar **) xmlMalloc(10 * sizeof(xmlChar *)); + if (ctxt->nameTab == NULL) + return(-1); + ctxt->nameNr = 0; + ctxt->nameMax = 10; + ctxt->name = NULL; + + ctxt->nodeInfoTab = NULL; + ctxt->nodeInfoNr = 0; + ctxt->nodeInfoMax = 0; + + ctxt->myDoc = NULL; + ctxt->wellFormed = 1; + ctxt->replaceEntities = 0; + ctxt->linenumbers = xmlLineNumbersDefaultValue; + ctxt->keepBlanks = xmlKeepBlanksDefaultValue; + ctxt->html = 1; + ctxt->vctxt.flags = XML_VCTXT_USE_PCTXT; + ctxt->vctxt.userData = ctxt; + ctxt->vctxt.error = xmlParserValidityError; + ctxt->vctxt.warning = xmlParserValidityWarning; + ctxt->record_info = 0; + ctxt->validate = 0; + ctxt->checkIndex = 0; + ctxt->catalogs = NULL; + xmlInitNodeInfoSeq(&ctxt->node_seq); + return(0); +} + +/** + * htmlFreeParserCtxt: + * @ctxt: an HTML parser context + * + * Free all the memory used by a parser context. However the parsed + * document in ctxt->myDoc is not freed. + */ + +void +htmlFreeParserCtxt(htmlParserCtxtPtr ctxt) +{ + xmlFreeParserCtxt(ctxt); +} + +/** + * htmlNewParserCtxt: + * + * Allocate and initialize a new HTML parser context. + * + * This can be used to parse HTML documents into DOM trees with + * functions like xmlCtxtReadFile or xmlCtxtReadMemory. + * + * See htmlCtxtUseOptions for parser options. + * + * See xmlCtxtSetErrorHandler for advanced error handling. + * + * See htmlNewSAXParserCtxt for custom SAX parsers. + * + * Returns the htmlParserCtxtPtr or NULL in case of allocation error + */ + +htmlParserCtxtPtr +htmlNewParserCtxt(void) +{ + return(htmlNewSAXParserCtxt(NULL, NULL)); +} + +/** + * htmlNewSAXParserCtxt: + * @sax: SAX handler + * @userData: user data + * + * Allocate and initialize a new HTML SAX parser context. If userData + * is NULL, the parser context will be passed as user data. + * + * Available since 2.11.0. If you want support older versions, + * it's best to invoke htmlNewParserCtxt and set ctxt->sax with + * struct assignment. + * + * Also see htmlNewParserCtxt. + * + * Returns the htmlParserCtxtPtr or NULL in case of allocation error + */ + +htmlParserCtxtPtr +htmlNewSAXParserCtxt(const htmlSAXHandler *sax, void *userData) +{ + xmlParserCtxtPtr ctxt; + + xmlInitParser(); + + ctxt = (xmlParserCtxtPtr) xmlMalloc(sizeof(xmlParserCtxt)); + if (ctxt == NULL) + return(NULL); + memset(ctxt, 0, sizeof(xmlParserCtxt)); + if (htmlInitParserCtxt(ctxt, sax, userData) < 0) { + htmlFreeParserCtxt(ctxt); + return(NULL); + } + return(ctxt); +} + +static htmlParserCtxtPtr +htmlCreateMemoryParserCtxtInternal(const char *url, + const char *buffer, size_t size, + const char *encoding) { + xmlParserCtxtPtr ctxt; + xmlParserInputPtr input; + + if (buffer == NULL) + return(NULL); + + ctxt = htmlNewParserCtxt(); + if (ctxt == NULL) + return(NULL); + + input = xmlCtxtNewInputFromMemory(ctxt, url, buffer, size, encoding, 0); + if (input == NULL) { + xmlFreeParserCtxt(ctxt); + return(NULL); + } + + if (xmlCtxtPushInput(ctxt, input) < 0) { + xmlFreeInputStream(input); + xmlFreeParserCtxt(ctxt); + return(NULL); + } + + return(ctxt); +} + +/** + * htmlCreateMemoryParserCtxt: + * @buffer: a pointer to a char array + * @size: the size of the array + * + * DEPRECATED: Use htmlNewParserCtxt and htmlCtxtReadMemory. + * + * Create a parser context for an HTML in-memory document. The input + * buffer must not contain any terminating null bytes. + * + * Returns the new parser context or NULL + */ +htmlParserCtxtPtr +htmlCreateMemoryParserCtxt(const char *buffer, int size) { + if (size <= 0) + return(NULL); + + return(htmlCreateMemoryParserCtxtInternal(NULL, buffer, size, NULL)); +} + +/** + * htmlCreateDocParserCtxt: + * @str: a pointer to an array of xmlChar + * @encoding: encoding (optional) + * + * Create a parser context for a null-terminated string. + * + * Returns the new parser context or NULL if a memory allocation failed. + */ +static htmlParserCtxtPtr +htmlCreateDocParserCtxt(const xmlChar *str, const char *url, + const char *encoding) { + xmlParserCtxtPtr ctxt; + xmlParserInputPtr input; + + if (str == NULL) + return(NULL); + + ctxt = htmlNewParserCtxt(); + if (ctxt == NULL) + return(NULL); + + input = xmlCtxtNewInputFromString(ctxt, url, (const char *) str, + encoding, 0); + if (input == NULL) { + xmlFreeParserCtxt(ctxt); + return(NULL); + } + + if (xmlCtxtPushInput(ctxt, input) < 0) { + xmlFreeInputStream(input); + xmlFreeParserCtxt(ctxt); + return(NULL); + } + + return(ctxt); +} + +#ifdef LIBXML_PUSH_ENABLED +/************************************************************************ + * * + * Progressive parsing interfaces * + * * + ************************************************************************/ + +enum xmlLookupStates { + LSTATE_TAG_NAME = 0, + LSTATE_BEFORE_ATTR_NAME, + LSTATE_ATTR_NAME, + LSTATE_AFTER_ATTR_NAME, + LSTATE_BEFORE_ATTR_VALUE, + LSTATE_ATTR_VALUE_DQUOTED, + LSTATE_ATTR_VALUE_SQUOTED, + LSTATE_ATTR_VALUE_UNQUOTED +}; + +/** + * htmlParseLookupGt: + * @ctxt: an HTML parser context + * + * Check whether there's enough data in the input buffer to finish parsing + * a tag. This has to take quotes into account. + */ +static int +htmlParseLookupGt(xmlParserCtxtPtr ctxt) { + const xmlChar *cur; + const xmlChar *end = ctxt->input->end; + int state = ctxt->endCheckState; + size_t index; + + if (ctxt->checkIndex == 0) + cur = ctxt->input->cur + 2; /* Skip 'input->cur + ctxt->checkIndex; + + while (cur < end) { + int c = *cur++; + + if (state != LSTATE_ATTR_VALUE_SQUOTED && + state != LSTATE_ATTR_VALUE_DQUOTED) { + if (c == '/' && + state != LSTATE_BEFORE_ATTR_VALUE && + state != LSTATE_ATTR_VALUE_UNQUOTED) { + state = LSTATE_BEFORE_ATTR_NAME; + continue; + } else if (c == '>') { + ctxt->checkIndex = 0; + ctxt->endCheckState = 0; + return(0); + } + } + + switch (state) { + case LSTATE_TAG_NAME: + if (IS_WS_HTML(c)) + state = LSTATE_BEFORE_ATTR_NAME; + break; + + case LSTATE_BEFORE_ATTR_NAME: + if (!IS_WS_HTML(c)) + state = LSTATE_ATTR_NAME; + break; + + case LSTATE_ATTR_NAME: + if (c == '=') + state = LSTATE_BEFORE_ATTR_VALUE; + else if (IS_WS_HTML(c)) + state = LSTATE_AFTER_ATTR_NAME; + break; + + case LSTATE_AFTER_ATTR_NAME: + if (c == '=') + state = LSTATE_BEFORE_ATTR_VALUE; + else if (!IS_WS_HTML(c)) + state = LSTATE_ATTR_NAME; + break; + + case LSTATE_BEFORE_ATTR_VALUE: + if (c == '"') + state = LSTATE_ATTR_VALUE_DQUOTED; + else if (c == '\'') + state = LSTATE_ATTR_VALUE_SQUOTED; + else if (!IS_WS_HTML(c)) + state = LSTATE_ATTR_VALUE_UNQUOTED; + break; + + case LSTATE_ATTR_VALUE_DQUOTED: + if (c == '"') + state = LSTATE_BEFORE_ATTR_NAME; + break; + + case LSTATE_ATTR_VALUE_SQUOTED: + if (c == '\'') + state = LSTATE_BEFORE_ATTR_NAME; + break; + + case LSTATE_ATTR_VALUE_UNQUOTED: + if (IS_WS_HTML(c)) + state = LSTATE_BEFORE_ATTR_NAME; + break; + } + } + + index = cur - ctxt->input->cur; + if (index > LONG_MAX) { + ctxt->checkIndex = 0; + ctxt->endCheckState = 0; + return(0); + } + ctxt->checkIndex = index; + ctxt->endCheckState = state; + return(-1); +} + +/** + * htmlParseLookupString: + * @ctxt: an XML parser context + * @startDelta: delta to apply at the start + * @str: string + * @strLen: length of string + * + * Check whether the input buffer contains a string. + */ +static int +htmlParseLookupString(xmlParserCtxtPtr ctxt, size_t startDelta, + const char *str, size_t strLen, size_t extraLen) { + const xmlChar *end = ctxt->input->end; + const xmlChar *cur, *term; + size_t index, rescan; + int ret; + + if (ctxt->checkIndex == 0) { + cur = ctxt->input->cur + startDelta; + } else { + cur = ctxt->input->cur + ctxt->checkIndex; + } + + term = BAD_CAST strstr((const char *) cur, str); + if ((term != NULL) && + ((size_t) (ctxt->input->end - term) >= extraLen + 1)) { + ctxt->checkIndex = 0; + + if (term - ctxt->input->cur > INT_MAX / 2) + ret = INT_MAX / 2; + else + ret = term - ctxt->input->cur; + + return(ret); + } + + /* Rescan (strLen + extraLen - 1) characters. */ + rescan = strLen + extraLen - 1; + if ((size_t) (end - cur) <= rescan) + end = cur; + else + end -= rescan; + index = end - ctxt->input->cur; + if (index > INT_MAX / 2) { + ctxt->checkIndex = 0; + ret = INT_MAX / 2; + } else { + ctxt->checkIndex = index; + ret = -1; + } + + return(ret); +} + +/** + * htmlParseLookupCommentEnd: + * @ctxt: an HTML parser context + * + * Try to find a comment end tag in the input stream + * The search includes "-->" as well as WHATWG-recommended incorrectly-closed tags. + * (See https://html.spec.whatwg.org/multipage/parsing.html#parse-error-incorrectly-closed-comment) + * This function has a side effect of (possibly) incrementing ctxt->checkIndex + * to avoid rescanning sequences of bytes, it DOES change the state of the + * parser, do not use liberally. + * + * Returns the index to the current parsing point if the full sequence is available, -1 otherwise. + */ +static int +htmlParseLookupCommentEnd(htmlParserCtxtPtr ctxt) +{ + int mark = 0; + int offset; + + while (1) { + mark = htmlParseLookupString(ctxt, 2, "--", 2, 0); + if (mark < 0) + break; + if ((NXT(mark+2) == '>') || + ((NXT(mark+2) == '!') && (NXT(mark+3) == '>'))) { + ctxt->checkIndex = 0; + break; + } + offset = (NXT(mark+2) == '!') ? 3 : 2; + if (mark + offset >= ctxt->input->end - ctxt->input->cur) { + ctxt->checkIndex = mark; + return(-1); + } + ctxt->checkIndex = mark + 1; + } + return mark; +} + + +/** + * htmlParseTryOrFinish: + * @ctxt: an HTML parser context + * @terminate: last chunk indicator + * + * Try to progress on parsing + * + * Returns zero if no parsing was possible + */ +static int +htmlParseTryOrFinish(htmlParserCtxtPtr ctxt, int terminate) { + int ret = 0; + htmlParserInputPtr in; + ptrdiff_t avail = 0; + int cur; + + htmlParserNodeInfo node_info; + + while (PARSER_STOPPED(ctxt) == 0) { + + in = ctxt->input; + if (in == NULL) break; + avail = in->end - in->cur; + if ((avail == 0) && (terminate)) { + htmlAutoCloseOnEnd(ctxt); + if ((ctxt->nameNr == 0) && (ctxt->instate != XML_PARSER_EOF)) { + /* + * SAX: end of the document processing. + */ + ctxt->instate = XML_PARSER_EOF; + if ((ctxt->sax) && (ctxt->sax->endDocument != NULL)) + ctxt->sax->endDocument(ctxt->userData); + } + } + if (avail < 1) + goto done; + cur = in->cur[0]; + + switch (ctxt->instate) { + case XML_PARSER_EOF: + /* + * Document parsing is done ! + */ + goto done; + case XML_PARSER_START: + /* + * This is wrong but matches long-standing behavior. In most + * cases, a document starting with an XML declaration will + * specify UTF-8. + */ + if (((ctxt->input->flags & XML_INPUT_HAS_ENCODING) == 0) && + (xmlStrncmp(ctxt->input->cur, BAD_CAST "sax) && (ctxt->sax->setDocumentLocator)) { + ctxt->sax->setDocumentLocator(ctxt->userData, + (xmlSAXLocator *) &xmlDefaultSAXLocator); + } + if ((ctxt->sax) && (ctxt->sax->startDocument) && + (!ctxt->disableSAX)) + ctxt->sax->startDocument(ctxt->userData); + + /* Allow callback to modify state */ + if (ctxt->instate == XML_PARSER_START) + ctxt->instate = XML_PARSER_MISC; + break; + case XML_PARSER_START_TAG: { + const xmlChar *name; + int next; + const htmlElemDesc * info; + + /* + * not enough chars in buffer + */ + if (avail < 2) + goto done; + cur = in->cur[0]; + next = in->cur[1]; + if (cur != '<') { + ctxt->instate = XML_PARSER_CONTENT; + break; + } + if (next == '/') { + ctxt->instate = XML_PARSER_END_TAG; + ctxt->checkIndex = 0; + break; + } + if ((!terminate) && + (htmlParseLookupGt(ctxt) < 0)) + goto done; + + /* Capture start position */ + if (ctxt->record_info) { + node_info.begin_pos = ctxt->input->consumed + + (CUR_PTR - ctxt->input->base); + node_info.begin_line = ctxt->input->line; + } + + + htmlParseStartTag(ctxt); + name = ctxt->name; + if (name == NULL) + break; + + /* + * Check for an Empty Element labeled the XML/SGML way + */ + if ((CUR == '/') && (NXT(1) == '>')) { + SKIP(2); + htmlParserFinishElementParsing(ctxt); + if ((ctxt->options & HTML_PARSE_HTML5) == 0) { + if ((ctxt->sax != NULL) && + (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, name); + } + htmlnamePop(ctxt); + ctxt->instate = XML_PARSER_CONTENT; + break; + } + + if (CUR != '>') + break; + SKIP(1); + + /* + * Lookup the info for that element. + */ + info = htmlTagLookup(name); + + /* + * Check for an Empty Element from DTD definition + */ + if ((info != NULL) && (info->empty)) { + htmlParserFinishElementParsing(ctxt); + if ((ctxt->options & HTML_PARSE_HTML5) == 0) { + if ((ctxt->sax != NULL) && + (ctxt->sax->endElement != NULL)) + ctxt->sax->endElement(ctxt->userData, name); + } + htmlnamePop(ctxt); + } + + if (info != NULL) + ctxt->endCheckState = info->dataMode; + + if (ctxt->record_info) + htmlNodeInfoPush(ctxt, &node_info); + + ctxt->instate = XML_PARSER_CONTENT; + break; + } + case XML_PARSER_MISC: + case XML_PARSER_PROLOG: + case XML_PARSER_CONTENT: + case XML_PARSER_EPILOG: { + int mode; + + if ((ctxt->instate == XML_PARSER_MISC) || + (ctxt->instate == XML_PARSER_PROLOG)) { + SKIP_BLANKS; + avail = in->end - in->cur; + } + + if (avail < 1) + goto done; + cur = in->cur[0]; + mode = ctxt->endCheckState; + + if (mode != 0) { + while ((PARSER_STOPPED(ctxt) == 0) && + (in->cur < in->end)) { + size_t extra; + + extra = strlen((const char *) ctxt->name) + 2; + + if ((!terminate) && + (htmlParseLookupString(ctxt, 0, "<", 1, + extra) < 0)) + goto done; + ctxt->checkIndex = 0; + + if (htmlParseCharData(ctxt)) + break; + } + + break; + } else if (cur == '<') { + int next; + + if (avail < 2) { + if (!terminate) + goto done; + next = ' '; + } else { + next = in->cur[1]; + } + + if (next == '!') { + if ((!terminate) && (avail < 4)) + goto done; + if ((in->cur[2] == '-') && (in->cur[3] == '-')) { + if ((!terminate) && + (htmlParseLookupCommentEnd(ctxt) < 0)) + goto done; + SKIP(4); + htmlParseComment(ctxt, /* bogus */ 0); + break; + } + + if ((!terminate) && (avail < 9)) + goto done; + if ((UPP(2) == 'D') && (UPP(3) == 'O') && + (UPP(4) == 'C') && (UPP(5) == 'T') && + (UPP(6) == 'Y') && (UPP(7) == 'P') && + (UPP(8) == 'E')) { + if ((!terminate) && + (htmlParseLookupString(ctxt, 9, ">", 1, + 0) < 0)) + goto done; + htmlParseDocTypeDecl(ctxt); + if (ctxt->instate == XML_PARSER_MISC) + ctxt->instate = XML_PARSER_PROLOG; + } else { + if ((!terminate) && + (htmlParseLookupString(ctxt, 2, ">", 1, 0) < 0)) + goto done; + SKIP(2); + htmlParseComment(ctxt, /* bogus */ 1); + } + } else if (next == '?') { + if ((!terminate) && + (htmlParseLookupString(ctxt, 2, ">", 1, 0) < 0)) + goto done; + SKIP(1); + htmlParseComment(ctxt, /* bogus */ 1); + } else if (next == '/') { + ctxt->instate = XML_PARSER_END_TAG; + ctxt->checkIndex = 0; + break; + } else if (IS_ASCII_LETTER(next)) { + if ((!terminate) && (next == 0)) + goto done; + ctxt->instate = XML_PARSER_START_TAG; + ctxt->checkIndex = 0; + break; + } else { + ctxt->instate = XML_PARSER_CONTENT; + htmlCheckParagraph(ctxt); + if ((ctxt->sax != NULL) && (!ctxt->disableSAX) && + (ctxt->sax->characters != NULL)) + ctxt->sax->characters(ctxt->userData, + BAD_CAST "<", 1); + SKIP(1); + } + } else { + /* + * check that the text sequence is complete + * before handing out the data to the parser + * to avoid problems with erroneous end of + * data detection. + */ + if ((!terminate) && + (htmlParseLookupString(ctxt, 0, "<", 1, 0) < 0)) + goto done; + ctxt->checkIndex = 0; + htmlParseCharData(ctxt); + } + + break; + } + case XML_PARSER_END_TAG: + if ((terminate) && (avail == 2)) { + htmlCheckParagraph(ctxt); + if ((ctxt->sax != NULL) && (!ctxt->disableSAX) && + (ctxt->sax->characters != NULL)) + ctxt->sax->characters(ctxt->userData, + BAD_CAST "nameNr == 0) { + ctxt->instate = XML_PARSER_EPILOG; + } else { + ctxt->instate = XML_PARSER_CONTENT; + } + ctxt->checkIndex = 0; + break; + default: + htmlParseErr(ctxt, XML_ERR_INTERNAL_ERROR, + "HPP: internal error\n", NULL, NULL); + ctxt->instate = XML_PARSER_EOF; + break; + } + } +done: + if ((avail == 0) && (terminate)) { + htmlAutoCloseOnEnd(ctxt); + if ((ctxt->nameNr == 0) && (ctxt->instate != XML_PARSER_EOF)) { + /* + * SAX: end of the document processing. + */ + ctxt->instate = XML_PARSER_EOF; + if ((ctxt->sax) && (ctxt->sax->endDocument != NULL)) + ctxt->sax->endDocument(ctxt->userData); + } + } + if ((!(ctxt->options & HTML_PARSE_NODEFDTD)) && (ctxt->myDoc != NULL) && + ((terminate) || (ctxt->instate == XML_PARSER_EOF) || + (ctxt->instate == XML_PARSER_EPILOG))) { + xmlDtdPtr dtd; + dtd = xmlGetIntSubset(ctxt->myDoc); + if (dtd == NULL) { + ctxt->myDoc->intSubset = + xmlCreateIntSubset(ctxt->myDoc, BAD_CAST "html", + BAD_CAST "-//W3C//DTD HTML 4.0 Transitional//EN", + BAD_CAST "http://www.w3.org/TR/REC-html40/loose.dtd"); + if (ctxt->myDoc->intSubset == NULL) + htmlErrMemory(ctxt); + } + } + return(ret); +} + +/** + * htmlParseChunk: + * @ctxt: an HTML parser context + * @chunk: chunk of memory + * @size: size of chunk in bytes + * @terminate: last chunk indicator + * + * Parse a chunk of memory in push parser mode. + * + * Assumes that the parser context was initialized with + * htmlCreatePushParserCtxt. + * + * The last chunk, which will often be empty, must be marked with + * the @terminate flag. With the default SAX callbacks, the resulting + * document will be available in ctxt->myDoc. This pointer will not + * be freed by the library. + * + * If the document isn't well-formed, ctxt->myDoc is set to NULL. + * + * Returns an xmlParserErrors code (0 on success). + */ +int +htmlParseChunk(htmlParserCtxtPtr ctxt, const char *chunk, int size, + int terminate) { + if ((ctxt == NULL) || (ctxt->input == NULL)) + return(XML_ERR_ARGUMENT); + if (PARSER_STOPPED(ctxt) != 0) + return(ctxt->errNo); + if ((size > 0) && (chunk != NULL) && (ctxt->input != NULL) && + (ctxt->input->buf != NULL)) { + size_t pos = ctxt->input->cur - ctxt->input->base; + int res; + + res = xmlParserInputBufferPush(ctxt->input->buf, size, chunk); + xmlBufUpdateInput(ctxt->input->buf->buffer, ctxt->input, pos); + if (res < 0) { + htmlParseErr(ctxt, ctxt->input->buf->error, + "xmlParserInputBufferPush failed", NULL, NULL); + xmlHaltParser(ctxt); + return (ctxt->errNo); + } + } + htmlParseTryOrFinish(ctxt, terminate); + if (terminate) { + if (ctxt->instate != XML_PARSER_EOF) { + if ((ctxt->sax) && (ctxt->sax->endDocument != NULL)) + ctxt->sax->endDocument(ctxt->userData); + } + ctxt->instate = XML_PARSER_EOF; + } + return((xmlParserErrors) ctxt->errNo); +} + +/************************************************************************ + * * + * User entry points * + * * + ************************************************************************/ + +/** + * htmlCreatePushParserCtxt: + * @sax: a SAX handler (optional) + * @user_data: The user data returned on SAX callbacks (optional) + * @chunk: a pointer to an array of chars (optional) + * @size: number of chars in the array + * @filename: only used for error reporting (optional) + * @enc: encoding (deprecated, pass XML_CHAR_ENCODING_NONE) + * + * Create a parser context for using the HTML parser in push mode. + * + * Returns the new parser context or NULL if a memory allocation + * failed. + */ +htmlParserCtxtPtr +htmlCreatePushParserCtxt(htmlSAXHandlerPtr sax, void *user_data, + const char *chunk, int size, const char *filename, + xmlCharEncoding enc) { + htmlParserCtxtPtr ctxt; + htmlParserInputPtr input; + const char *encoding; + + ctxt = htmlNewSAXParserCtxt(sax, user_data); + if (ctxt == NULL) + return(NULL); + + encoding = xmlGetCharEncodingName(enc); + input = xmlNewPushInput(filename, chunk, size); + if (input == NULL) { + htmlFreeParserCtxt(ctxt); + return(NULL); + } + + if (xmlCtxtPushInput(ctxt, input) < 0) { + xmlFreeInputStream(input); + xmlFreeParserCtxt(ctxt); + return(NULL); + } + + if (encoding != NULL) + xmlSwitchEncodingName(ctxt, encoding); + + return(ctxt); +} +#endif /* LIBXML_PUSH_ENABLED */ + +/** + * htmlSAXParseDoc: + * @cur: a pointer to an array of xmlChar + * @encoding: a free form C string describing the HTML document encoding, or NULL + * @sax: the SAX handler block + * @userData: if using SAX, this pointer will be provided on callbacks. + * + * DEPRECATED: Use htmlNewSAXParserCtxt and htmlCtxtReadDoc. + * + * Parse an HTML in-memory document. If sax is not NULL, use the SAX callbacks + * to handle parse events. If sax is NULL, fallback to the default DOM + * behavior and return a tree. + * + * Returns the resulting document tree unless SAX is NULL or the document is + * not well formed. + */ + +htmlDocPtr +htmlSAXParseDoc(const xmlChar *cur, const char *encoding, + htmlSAXHandlerPtr sax, void *userData) { + htmlDocPtr ret; + htmlParserCtxtPtr ctxt; + + if (cur == NULL) + return(NULL); + + ctxt = htmlCreateDocParserCtxt(cur, NULL, encoding); + if (ctxt == NULL) + return(NULL); + + if (sax != NULL) { + *ctxt->sax = *sax; + ctxt->userData = userData; + } + + htmlParseDocument(ctxt); + ret = ctxt->myDoc; + htmlFreeParserCtxt(ctxt); + + return(ret); +} + +/** + * htmlParseDoc: + * @cur: a pointer to an array of xmlChar + * @encoding: the encoding (optional) + * + * DEPRECATED: Use htmlReadDoc. + * + * Parse an HTML in-memory document and build a tree. + * + * This function uses deprecated global parser options. + * + * Returns the resulting document tree + */ + +htmlDocPtr +htmlParseDoc(const xmlChar *cur, const char *encoding) { + return(htmlSAXParseDoc(cur, encoding, NULL, NULL)); +} + + +/** + * htmlCreateFileParserCtxt: + * @filename: the filename + * @encoding: optional encoding + * + * DEPRECATED: Use htmlNewParserCtxt and htmlCtxtReadFile. + * + * Create a parser context to read from a file. + * + * A non-NULL encoding overrides encoding declarations in the document. + * + * Automatic support for ZLIB/Compress compressed document is provided + * by default if found at compile-time. + * + * Returns the new parser context or NULL if a memory allocation failed. + */ +htmlParserCtxtPtr +htmlCreateFileParserCtxt(const char *filename, const char *encoding) +{ + htmlParserCtxtPtr ctxt; + htmlParserInputPtr input; + + if (filename == NULL) + return(NULL); + + ctxt = htmlNewParserCtxt(); + if (ctxt == NULL) { + return(NULL); + } + + input = xmlCtxtNewInputFromUrl(ctxt, filename, NULL, encoding, 0); + if (input == NULL) { + xmlFreeParserCtxt(ctxt); + return(NULL); + } + if (xmlCtxtPushInput(ctxt, input) < 0) { + xmlFreeInputStream(input); + xmlFreeParserCtxt(ctxt); + return(NULL); + } + + return(ctxt); +} + +/** + * htmlSAXParseFile: + * @filename: the filename + * @encoding: encoding (optional) + * @sax: the SAX handler block + * @userData: if using SAX, this pointer will be provided on callbacks. + * + * DEPRECATED: Use htmlNewSAXParserCtxt and htmlCtxtReadFile. + * + * parse an HTML file and build a tree. Automatic support for ZLIB/Compress + * compressed document is provided by default if found at compile-time. + * It use the given SAX function block to handle the parsing callback. + * If sax is NULL, fallback to the default DOM tree building routines. + * + * Returns the resulting document tree unless SAX is NULL or the document is + * not well formed. + */ + +htmlDocPtr +htmlSAXParseFile(const char *filename, const char *encoding, htmlSAXHandlerPtr sax, + void *userData) { + htmlDocPtr ret; + htmlParserCtxtPtr ctxt; + htmlSAXHandlerPtr oldsax = NULL; + + ctxt = htmlCreateFileParserCtxt(filename, encoding); + if (ctxt == NULL) return(NULL); + if (sax != NULL) { + oldsax = ctxt->sax; + ctxt->sax = sax; + ctxt->userData = userData; + } + + htmlParseDocument(ctxt); + + ret = ctxt->myDoc; + if (sax != NULL) { + ctxt->sax = oldsax; + ctxt->userData = NULL; + } + htmlFreeParserCtxt(ctxt); + + return(ret); +} + +/** + * htmlParseFile: + * @filename: the filename + * @encoding: encoding (optional) + * + * Parse an HTML file and build a tree. + * + * Returns the resulting document tree + */ + +htmlDocPtr +htmlParseFile(const char *filename, const char *encoding) { + return(htmlSAXParseFile(filename, encoding, NULL, NULL)); +} + +/** + * htmlHandleOmittedElem: + * @val: int 0 or 1 + * + * DEPRECATED: Use HTML_PARSE_NOIMPLIED + * + * Set and return the previous value for handling HTML omitted tags. + * + * Returns the last value for 0 for no handling, 1 for auto insertion. + */ + +int +htmlHandleOmittedElem(int val) { + int old = htmlOmittedDefaultValue; + + htmlOmittedDefaultValue = val; + return(old); +} + +/** + * htmlElementAllowedHere: + * @parent: HTML parent element + * @elt: HTML element + * + * DEPRECATED: Don't use. + * + * Returns 1 + */ +int +htmlElementAllowedHere(const htmlElemDesc* parent ATTRIBUTE_UNUSED, + const xmlChar* elt ATTRIBUTE_UNUSED) { + return(1); +} + +/** + * htmlElementStatusHere: + * @parent: HTML parent element + * @elt: HTML element + * + * DEPRECATED: Don't use. + * + * Returns HTML_VALID + */ +htmlStatus +htmlElementStatusHere(const htmlElemDesc* parent ATTRIBUTE_UNUSED, + const htmlElemDesc* elt ATTRIBUTE_UNUSED) { + return(HTML_VALID); +} + +/** + * htmlAttrAllowed: + * @elt: HTML element + * @attr: HTML attribute + * @legacy: whether to allow deprecated attributes + * + * DEPRECATED: Don't use. + * + * Returns HTML_VALID + */ +htmlStatus +htmlAttrAllowed(const htmlElemDesc* elt ATTRIBUTE_UNUSED, + const xmlChar* attr ATTRIBUTE_UNUSED, + int legacy ATTRIBUTE_UNUSED) { + return(HTML_VALID); +} + +/** + * htmlNodeStatus: + * @node: an htmlNodePtr in a tree + * @legacy: whether to allow deprecated elements (YES is faster here + * for Element nodes) + * + * DEPRECATED: Don't use. + * + * Returns HTML_VALID + */ +htmlStatus +htmlNodeStatus(htmlNodePtr node ATTRIBUTE_UNUSED, + int legacy ATTRIBUTE_UNUSED) { + return(HTML_VALID); +} + +/************************************************************************ + * * + * New set (2.6.0) of simpler and more flexible APIs * + * * + ************************************************************************/ +/** + * DICT_FREE: + * @str: a string + * + * Free a string if it is not owned by the "dict" dictionary in the + * current scope + */ +#define DICT_FREE(str) \ + if ((str) && ((!dict) || \ + (xmlDictOwns(dict, (const xmlChar *)(str)) == 0))) \ + xmlFree((char *)(str)); + +/** + * htmlCtxtReset: + * @ctxt: an HTML parser context + * + * Reset a parser context + */ +void +htmlCtxtReset(htmlParserCtxtPtr ctxt) +{ + xmlParserInputPtr input; + xmlDictPtr dict; + + if (ctxt == NULL) + return; + + dict = ctxt->dict; + + while ((input = xmlCtxtPopInput(ctxt)) != NULL) { /* Non consuming */ + xmlFreeInputStream(input); + } + ctxt->inputNr = 0; + ctxt->input = NULL; + + ctxt->spaceNr = 0; + if (ctxt->spaceTab != NULL) { + ctxt->spaceTab[0] = -1; + ctxt->space = &ctxt->spaceTab[0]; + } else { + ctxt->space = NULL; + } + + + ctxt->nodeNr = 0; + ctxt->node = NULL; + + ctxt->nameNr = 0; + ctxt->name = NULL; + + ctxt->nsNr = 0; + + DICT_FREE(ctxt->version); + ctxt->version = NULL; + DICT_FREE(ctxt->encoding); + ctxt->encoding = NULL; + DICT_FREE(ctxt->extSubURI); + ctxt->extSubURI = NULL; + DICT_FREE(ctxt->extSubSystem); + ctxt->extSubSystem = NULL; + + if (ctxt->directory != NULL) { + xmlFree(ctxt->directory); + ctxt->directory = NULL; + } + + if (ctxt->myDoc != NULL) + xmlFreeDoc(ctxt->myDoc); + ctxt->myDoc = NULL; + + ctxt->standalone = -1; + ctxt->hasExternalSubset = 0; + ctxt->hasPErefs = 0; + ctxt->html = 1; + ctxt->instate = XML_PARSER_START; + + ctxt->wellFormed = 1; + ctxt->nsWellFormed = 1; + ctxt->disableSAX = 0; + ctxt->valid = 1; + ctxt->vctxt.userData = ctxt; + ctxt->vctxt.flags = XML_VCTXT_USE_PCTXT; + ctxt->vctxt.error = xmlParserValidityError; + ctxt->vctxt.warning = xmlParserValidityWarning; + ctxt->record_info = 0; + ctxt->checkIndex = 0; + ctxt->endCheckState = 0; + ctxt->inSubset = 0; + ctxt->errNo = XML_ERR_OK; + ctxt->depth = 0; + ctxt->catalogs = NULL; + xmlInitNodeInfoSeq(&ctxt->node_seq); + + if (ctxt->attsDefault != NULL) { + xmlHashFree(ctxt->attsDefault, xmlHashDefaultDeallocator); + ctxt->attsDefault = NULL; + } + if (ctxt->attsSpecial != NULL) { + xmlHashFree(ctxt->attsSpecial, NULL); + ctxt->attsSpecial = NULL; + } + + ctxt->nbErrors = 0; + ctxt->nbWarnings = 0; + if (ctxt->lastError.code != XML_ERR_OK) + xmlResetError(&ctxt->lastError); +} + +static int +htmlCtxtSetOptionsInternal(xmlParserCtxtPtr ctxt, int options, int keepMask) +{ + int allMask; + + if (ctxt == NULL) + return(-1); + + allMask = HTML_PARSE_RECOVER | + HTML_PARSE_HTML5 | + HTML_PARSE_NODEFDTD | + HTML_PARSE_NOERROR | + HTML_PARSE_NOWARNING | + HTML_PARSE_PEDANTIC | + HTML_PARSE_NOBLANKS | + HTML_PARSE_NONET | + HTML_PARSE_NOIMPLIED | + HTML_PARSE_COMPACT | + HTML_PARSE_HUGE | + HTML_PARSE_IGNORE_ENC | + HTML_PARSE_BIG_LINES; + + ctxt->options = (ctxt->options & keepMask) | (options & allMask); + + /* + * For some options, struct members are historically the source + * of truth. See xmlCtxtSetOptionsInternal. + */ + ctxt->keepBlanks = (options & HTML_PARSE_NOBLANKS) ? 0 : 1; + + /* + * Changing SAX callbacks is a bad idea. This should be fixed. + */ + if (options & HTML_PARSE_NOBLANKS) { + ctxt->sax->ignorableWhitespace = xmlSAX2IgnorableWhitespace; + } + if (options & HTML_PARSE_HUGE) { + if (ctxt->dict != NULL) + xmlDictSetLimit(ctxt->dict, 0); + } + + /* + * It would be useful to allow this feature. + */ + ctxt->dictNames = 0; + + ctxt->linenumbers = 1; + + return(options & ~allMask); +} + +/** + * htmlCtxtSetOptions: + * @ctxt: an HTML parser context + * @options: a bitmask of xmlParserOption values + * + * Applies the options to the parser context. Unset options are + * cleared. + * + * Available since 2.14.0. With older versions, you can use + * htmlCtxtUseOptions. + * + * HTML_PARSE_RECOVER + * + * No effect as of 2.14.0. + * + * HTML_PARSE_HTML5 + * + * Make the tokenizer emit a SAX callback for each token. This results + * in unbalanced invocations of startElement and endElement. + * + * For now, this is only usable with custom SAX callbacks. + * + * HTML_PARSE_NODEFDTD + * + * Do not default to a doctype if none was found. + * + * HTML_PARSE_NOERROR + * + * Disable error and warning reports to the error handlers. + * Errors are still accessible with xmlCtxtGetLastError. + * + * HTML_PARSE_NOWARNING + * + * Disable warning reports. + * + * HTML_PARSE_PEDANTIC + * + * No effect. + * + * HTML_PARSE_NOBLANKS + * + * Remove some text nodes containing only whitespace from the + * result document. Which nodes are removed depends on a conservative + * heuristic. The reindenting feature of the serialization code relies + * on this option to be set when parsing. Use of this option is + * DISCOURAGED. + * + * HTML_PARSE_NONET + * + * No effect. + * + * HTML_PARSE_NOIMPLIED + * + * Do not add implied html, head or body elements. + * + * HTML_PARSE_COMPACT + * + * Store small strings directly in the node struct to save + * memory. + * + * HTML_PARSE_HUGE + * + * Relax some internal limits. + * + * Available since 2.14.0. Use XML_PARSE_HUGE works with older + * versions. + * + * Maximum size of text nodes, tags, comments, CDATA sections + * + * normal: 10M + * huge: 1B + * + * Maximum size of names, system literals, pubid literals + * + * normal: 50K + * huge: 10M + * + * Maximum nesting depth of elements + * + * normal: 256 + * huge: 2048 + * + * HTML_PARSE_IGNORE_ENC + * + * Ignore the encoding in the HTML declaration. This option is + * mostly unneeded these days. The only effect is to enforce + * UTF-8 decoding of ASCII-like data. + * + * HTML_PARSE_BIG_LINES + * + * Enable reporting of line numbers larger than 65535. + * + * Available since 2.14.0. + * + * Returns 0 in case of success, the set of unknown or unimplemented options + * in case of error. + */ +int +htmlCtxtSetOptions(xmlParserCtxtPtr ctxt, int options) +{ + return(htmlCtxtSetOptionsInternal(ctxt, options, 0)); +} + +/** + * htmlCtxtUseOptions: + * @ctxt: an HTML parser context + * @options: a combination of htmlParserOption(s) + * + * DEPRECATED: Use htmlCtxtSetOptions. + * + * Applies the options to the parser context. The following options + * are never cleared and can only be enabled: + * + * HTML_PARSE_NODEFDTD + * HTML_PARSE_NOERROR + * HTML_PARSE_NOWARNING + * HTML_PARSE_NOIMPLIED + * HTML_PARSE_COMPACT + * HTML_PARSE_HUGE + * HTML_PARSE_IGNORE_ENC + * HTML_PARSE_BIG_LINES + * + * Returns 0 in case of success, the set of unknown or unimplemented options + * in case of error. + */ +int +htmlCtxtUseOptions(htmlParserCtxtPtr ctxt, int options) +{ + int keepMask; + + /* + * For historic reasons, some options can only be enabled. + */ + keepMask = HTML_PARSE_NODEFDTD | + HTML_PARSE_NOERROR | + HTML_PARSE_NOWARNING | + HTML_PARSE_NOIMPLIED | + HTML_PARSE_COMPACT | + HTML_PARSE_HUGE | + HTML_PARSE_IGNORE_ENC | + HTML_PARSE_BIG_LINES; + + return(htmlCtxtSetOptionsInternal(ctxt, options, keepMask)); +} + +/** + * htmlCtxtParseDocument: + * @ctxt: an HTML parser context + * @input: parser input + * + * Parse an HTML document and return the resulting document tree. + * + * Available since 2.13.0. + * + * Returns the resulting document tree or NULL + */ +htmlDocPtr +htmlCtxtParseDocument(htmlParserCtxtPtr ctxt, xmlParserInputPtr input) +{ + htmlDocPtr ret; + + if ((ctxt == NULL) || (input == NULL)) + return(NULL); + + /* assert(ctxt->inputNr == 0); */ + while (ctxt->inputNr > 0) + xmlFreeInputStream(xmlCtxtPopInput(ctxt)); + + if (xmlCtxtPushInput(ctxt, input) < 0) { + xmlFreeInputStream(input); + return(NULL); + } + + ctxt->html = 1; + htmlParseDocument(ctxt); + + if (ctxt->errNo != XML_ERR_NO_MEMORY) { + ret = ctxt->myDoc; + } else { + ret = NULL; + xmlFreeDoc(ctxt->myDoc); + } + ctxt->myDoc = NULL; + + /* assert(ctxt->inputNr == 1); */ + while (ctxt->inputNr > 0) + xmlFreeInputStream(xmlCtxtPopInput(ctxt)); + + return(ret); +} + +/** + * htmlReadDoc: + * @str: a pointer to a zero terminated string + * @url: only used for error reporting (optoinal) + * @encoding: the document encoding (optional) + * @options: a combination of htmlParserOptions + * + * Convenience function to parse an HTML document from a zero-terminated + * string. + * + * See htmlCtxtReadDoc for details. + * + * Returns the resulting document tree. + */ +htmlDocPtr +htmlReadDoc(const xmlChar *str, const char *url, const char *encoding, + int options) +{ + htmlParserCtxtPtr ctxt; + xmlParserInputPtr input; + htmlDocPtr doc; + + ctxt = htmlNewParserCtxt(); + if (ctxt == NULL) + return(NULL); + + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromString(ctxt, url, (const char *) str, encoding, + XML_INPUT_BUF_STATIC); + + doc = htmlCtxtParseDocument(ctxt, input); + + htmlFreeParserCtxt(ctxt); + return(doc); +} + +/** + * htmlReadFile: + * @filename: a file or URL + * @encoding: the document encoding (optional) + * @options: a combination of htmlParserOptions + * + * Convenience function to parse an HTML file from the filesystem, + * the network or a global user-defined resource loader. + * + * See htmlCtxtReadFile for details. + * + * Returns the resulting document tree. + */ +htmlDocPtr +htmlReadFile(const char *filename, const char *encoding, int options) +{ + htmlParserCtxtPtr ctxt; + xmlParserInputPtr input; + htmlDocPtr doc; + + ctxt = htmlNewParserCtxt(); + if (ctxt == NULL) + return(NULL); + + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromUrl(ctxt, filename, NULL, encoding, 0); + + doc = htmlCtxtParseDocument(ctxt, input); + + htmlFreeParserCtxt(ctxt); + return(doc); +} + +/** + * htmlReadMemory: + * @buffer: a pointer to a char array + * @size: the size of the array + * @url: only used for error reporting (optional) + * @encoding: the document encoding, or NULL + * @options: a combination of htmlParserOption(s) + * + * Convenience function to parse an HTML document from memory. + * The input buffer must not contain any terminating null bytes. + * + * See htmlCtxtReadMemory for details. + * + * Returns the resulting document tree + */ +htmlDocPtr +htmlReadMemory(const char *buffer, int size, const char *url, + const char *encoding, int options) +{ + htmlParserCtxtPtr ctxt; + xmlParserInputPtr input; + htmlDocPtr doc; + + if (size < 0) + return(NULL); + + ctxt = htmlNewParserCtxt(); + if (ctxt == NULL) + return(NULL); + + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromMemory(ctxt, url, buffer, size, encoding, + XML_INPUT_BUF_STATIC); + + doc = htmlCtxtParseDocument(ctxt, input); + + htmlFreeParserCtxt(ctxt); + return(doc); +} + +/** + * htmlReadFd: + * @fd: an open file descriptor + * @url: only used for error reporting (optional) + * @encoding: the document encoding, or NULL + * @options: a combination of htmlParserOptions + * + * Convenience function to parse an HTML document from a + * file descriptor. + * + * NOTE that the file descriptor will not be closed when the + * context is freed or reset. + * + * See htmlCtxtReadFd for details. + * + * Returns the resulting document tree + */ +htmlDocPtr +htmlReadFd(int fd, const char *url, const char *encoding, int options) +{ + htmlParserCtxtPtr ctxt; + xmlParserInputPtr input; + htmlDocPtr doc; + + ctxt = htmlNewParserCtxt(); + if (ctxt == NULL) + return(NULL); + + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromFd(ctxt, url, fd, encoding, 0); + + doc = htmlCtxtParseDocument(ctxt, input); + + htmlFreeParserCtxt(ctxt); + return(doc); +} + +/** + * htmlReadIO: + * @ioread: an I/O read function + * @ioclose: an I/O close function (optional) + * @ioctx: an I/O handler + * @url: only used for error reporting (optional) + * @encoding: the document encoding (optional) + * @options: a combination of htmlParserOption(s) + * + * Convenience function to parse an HTML document from I/O functions + * and context. + * + * See htmlCtxtReadIO for details. + * + * Returns the resulting document tree + */ +htmlDocPtr +htmlReadIO(xmlInputReadCallback ioread, xmlInputCloseCallback ioclose, + void *ioctx, const char *url, const char *encoding, int options) +{ + htmlParserCtxtPtr ctxt; + xmlParserInputPtr input; + htmlDocPtr doc; + + ctxt = htmlNewParserCtxt(); + if (ctxt == NULL) + return (NULL); + + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromIO(ctxt, url, ioread, ioclose, ioctx, + encoding, 0); + + doc = htmlCtxtParseDocument(ctxt, input); + + htmlFreeParserCtxt(ctxt); + return(doc); +} + +/** + * htmlCtxtReadDoc: + * @ctxt: an HTML parser context + * @str: a pointer to a zero terminated string + * @URL: only used for error reporting (optional) + * @encoding: the document encoding (optional) + * @options: a combination of htmlParserOptions + * + * Parse an HTML in-memory document and build a tree. + * + * See htmlCtxtUseOptions for details. + * + * Returns the resulting document tree + */ +htmlDocPtr +htmlCtxtReadDoc(htmlParserCtxtPtr ctxt, const xmlChar *str, + const char *URL, const char *encoding, int options) +{ + xmlParserInputPtr input; + + if (ctxt == NULL) + return (NULL); + + htmlCtxtReset(ctxt); + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromString(ctxt, URL, (const char *) str, + encoding, 0); + + return(htmlCtxtParseDocument(ctxt, input)); +} + +/** + * htmlCtxtReadFile: + * @ctxt: an HTML parser context + * @filename: a file or URL + * @encoding: the document encoding (optional) + * @options: a combination of htmlParserOptions + * + * Parse an HTML file from the filesystem, the network or a + * user-defined resource loader. + * + * See htmlCtxtUseOptions for details. + * + * Returns the resulting document tree + */ +htmlDocPtr +htmlCtxtReadFile(htmlParserCtxtPtr ctxt, const char *filename, + const char *encoding, int options) +{ + xmlParserInputPtr input; + + if (ctxt == NULL) + return (NULL); + + htmlCtxtReset(ctxt); + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromUrl(ctxt, filename, NULL, encoding, 0); + + return(htmlCtxtParseDocument(ctxt, input)); +} + +/** + * htmlCtxtReadMemory: + * @ctxt: an HTML parser context + * @buffer: a pointer to a char array + * @size: the size of the array + * @URL: only used for error reporting (optional) + * @encoding: the document encoding (optinal) + * @options: a combination of htmlParserOptions + * + * Parse an HTML in-memory document and build a tree. The input buffer must + * not contain any terminating null bytes. + * + * See htmlCtxtUseOptions for details. + * + * Returns the resulting document tree + */ +htmlDocPtr +htmlCtxtReadMemory(htmlParserCtxtPtr ctxt, const char *buffer, int size, + const char *URL, const char *encoding, int options) +{ + xmlParserInputPtr input; + + if ((ctxt == NULL) || (size < 0)) + return (NULL); + + htmlCtxtReset(ctxt); + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromMemory(ctxt, URL, buffer, size, encoding, + XML_INPUT_BUF_STATIC); + + return(htmlCtxtParseDocument(ctxt, input)); +} + +/** + * htmlCtxtReadFd: + * @ctxt: an HTML parser context + * @fd: an open file descriptor + * @URL: only used for error reporting (optional) + * @encoding: the document encoding (optinal) + * @options: a combination of htmlParserOptions + * + * Parse an HTML from a file descriptor and build a tree. + * + * See htmlCtxtUseOptions for details. + * + * NOTE that the file descriptor will not be closed when the + * context is freed or reset. + * + * Returns the resulting document tree + */ +htmlDocPtr +htmlCtxtReadFd(htmlParserCtxtPtr ctxt, int fd, + const char *URL, const char *encoding, int options) +{ + xmlParserInputPtr input; + + if (ctxt == NULL) + return(NULL); + + htmlCtxtReset(ctxt); + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromFd(ctxt, URL, fd, encoding, 0); + + return(htmlCtxtParseDocument(ctxt, input)); +} + +/** + * htmlCtxtReadIO: + * @ctxt: an HTML parser context + * @ioread: an I/O read function + * @ioclose: an I/O close function + * @ioctx: an I/O handler + * @URL: the base URL to use for the document + * @encoding: the document encoding, or NULL + * @options: a combination of htmlParserOption(s) + * + * Parse an HTML document from I/O functions and source and build a tree. + * + * See htmlCtxtUseOptions for details. + * + * Returns the resulting document tree + */ +htmlDocPtr +htmlCtxtReadIO(htmlParserCtxtPtr ctxt, xmlInputReadCallback ioread, + xmlInputCloseCallback ioclose, void *ioctx, + const char *URL, + const char *encoding, int options) +{ + xmlParserInputPtr input; + + if (ctxt == NULL) + return (NULL); + + htmlCtxtReset(ctxt); + htmlCtxtUseOptions(ctxt, options); + + input = xmlCtxtNewInputFromIO(ctxt, URL, ioread, ioclose, ioctx, + encoding, 0); + + return(htmlCtxtParseDocument(ctxt, input)); +} + +#endif /* LIBXML_HTML_ENABLED */ diff --git a/local-test-libxml2-delta-01/afc-libxml2/HTMLtree.c b/local-test-libxml2-delta-01/afc-libxml2/HTMLtree.c new file mode 100644 index 0000000000000000000000000000000000000000..06741c21f7da5b9bf081ed86e0fc96f058f5e5e1 --- /dev/null +++ b/local-test-libxml2-delta-01/afc-libxml2/HTMLtree.c @@ -0,0 +1,1131 @@ +/* + * HTMLtree.c : implementation of access function for an HTML tree. + * + * See Copyright for the status of this software. + * + * daniel@veillard.com + */ + + +#define IN_LIBXML +#include "libxml.h" +#ifdef LIBXML_HTML_ENABLED + +#include /* for memset() only ! */ +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "private/buf.h" +#include "private/error.h" +#include "private/io.h" +#include "private/save.h" + +/************************************************************************ + * * + * Getting/Setting encoding meta tags * + * * + ************************************************************************/ + +/** + * htmlGetMetaEncoding: + * @doc: the document + * + * Encoding definition lookup in the Meta tags + * + * Returns the current encoding as flagged in the HTML source + */ +const xmlChar * +htmlGetMetaEncoding(htmlDocPtr doc) { + htmlNodePtr cur; + const xmlChar *content; + const xmlChar *encoding; + + if (doc == NULL) + return(NULL); + cur = doc->children; + + /* + * Search the html + */ + while (cur != NULL) { + if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) { + if (xmlStrEqual(cur->name, BAD_CAST"html")) + break; + if (xmlStrEqual(cur->name, BAD_CAST"head")) + goto found_head; + if (xmlStrEqual(cur->name, BAD_CAST"meta")) + goto found_meta; + } + cur = cur->next; + } + if (cur == NULL) + return(NULL); + cur = cur->children; + + /* + * Search the head + */ + while (cur != NULL) { + if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) { + if (xmlStrEqual(cur->name, BAD_CAST"head")) + break; + if (xmlStrEqual(cur->name, BAD_CAST"meta")) + goto found_meta; + } + cur = cur->next; + } + if (cur == NULL) + return(NULL); +found_head: + cur = cur->children; + + /* + * Search the meta elements + */ +found_meta: + while (cur != NULL) { + if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) { + if (xmlStrEqual(cur->name, BAD_CAST"meta")) { + xmlAttrPtr attr = cur->properties; + int http; + const xmlChar *value; + + content = NULL; + http = 0; + while (attr != NULL) { + if ((attr->children != NULL) && + (attr->children->type == XML_TEXT_NODE) && + (attr->children->next == NULL)) { + value = attr->children->content; + if ((!xmlStrcasecmp(attr->name, BAD_CAST"http-equiv")) + && (!xmlStrcasecmp(value, BAD_CAST"Content-Type"))) + http = 1; + else if ((value != NULL) + && (!xmlStrcasecmp(attr->name, BAD_CAST"content"))) + content = value; + if ((http != 0) && (content != NULL)) + goto found_content; + } + attr = attr->next; + } + } + } + cur = cur->next; + } + return(NULL); + +found_content: + encoding = xmlStrstr(content, BAD_CAST"charset="); + if (encoding == NULL) + encoding = xmlStrstr(content, BAD_CAST"Charset="); + if (encoding == NULL) + encoding = xmlStrstr(content, BAD_CAST"CHARSET="); + if (encoding != NULL) { + encoding += 8; + } else { + encoding = xmlStrstr(content, BAD_CAST"charset ="); + if (encoding == NULL) + encoding = xmlStrstr(content, BAD_CAST"Charset ="); + if (encoding == NULL) + encoding = xmlStrstr(content, BAD_CAST"CHARSET ="); + if (encoding != NULL) + encoding += 9; + } + if (encoding != NULL) { + while ((*encoding == ' ') || (*encoding == '\t')) encoding++; + } + return(encoding); +} + +/** + * htmlSetMetaEncoding: + * @doc: the document + * @encoding: the encoding string + * + * Sets the current encoding in the Meta tags + * NOTE: this will not change the document content encoding, just + * the META flag associated. + * + * Returns 0 in case of success and -1 in case of error + */ +int +htmlSetMetaEncoding(htmlDocPtr doc, const xmlChar *encoding) { + htmlNodePtr cur, meta = NULL, head = NULL; + const xmlChar *content = NULL; + char newcontent[100]; + + newcontent[0] = 0; + + if (doc == NULL) + return(-1); + + /* html isn't a real encoding it's just libxml2 way to get entities */ + if (!xmlStrcasecmp(encoding, BAD_CAST "html")) + return(-1); + + if (encoding != NULL) { + snprintf(newcontent, sizeof(newcontent), "text/html; charset=%s", + (char *)encoding); + newcontent[sizeof(newcontent) - 1] = 0; + } + + cur = doc->children; + + /* + * Search the html + */ + while (cur != NULL) { + if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) { + if (xmlStrcasecmp(cur->name, BAD_CAST"html") == 0) + break; + if (xmlStrcasecmp(cur->name, BAD_CAST"head") == 0) + goto found_head; + if (xmlStrcasecmp(cur->name, BAD_CAST"meta") == 0) + goto found_meta; + } + cur = cur->next; + } + if (cur == NULL) + return(-1); + cur = cur->children; + + /* + * Search the head + */ + while (cur != NULL) { + if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) { + if (xmlStrcasecmp(cur->name, BAD_CAST"head") == 0) + break; + if (xmlStrcasecmp(cur->name, BAD_CAST"meta") == 0) { + head = cur->parent; + goto found_meta; + } + } + cur = cur->next; + } + if (cur == NULL) + return(-1); +found_head: + head = cur; + if (cur->children == NULL) + goto create; + cur = cur->children; + +found_meta: + /* + * Search and update all the remaining the meta elements carrying + * encoding information + */ + while (cur != NULL) { + if ((cur->type == XML_ELEMENT_NODE) && (cur->name != NULL)) { + if (xmlStrcasecmp(cur->name, BAD_CAST"meta") == 0) { + xmlAttrPtr attr = cur->properties; + int http; + const xmlChar *value; + + content = NULL; + http = 0; + while (attr != NULL) { + if ((attr->children != NULL) && + (attr->children->type == XML_TEXT_NODE) && + (attr->children->next == NULL)) { + value = attr->children->content; + if ((!xmlStrcasecmp(attr->name, BAD_CAST"http-equiv")) + && (!xmlStrcasecmp(value, BAD_CAST"Content-Type"))) + http = 1; + else + { + if ((value != NULL) && + (!xmlStrcasecmp(attr->name, BAD_CAST"content"))) + content = value; + } + if ((http != 0) && (content != NULL)) + break; + } + attr = attr->next; + } + if ((http != 0) && (content != NULL)) { + meta = cur; + break; + } + + } + } + cur = cur->next; + } +create: + if (meta == NULL) { + if ((encoding != NULL) && (head != NULL)) { + /* + * Create a new Meta element with the right attributes + */ + + meta = xmlNewDocNode(doc, NULL, BAD_CAST"meta", NULL); + if (head->children == NULL) + xmlAddChild(head, meta); + else + xmlAddPrevSibling(head->children, meta); + xmlNewProp(meta, BAD_CAST"http-equiv", BAD_CAST"Content-Type"); + xmlNewProp(meta, BAD_CAST"content", BAD_CAST newcontent); + } + } else { + /* remove the meta tag if NULL is passed */ + if (encoding == NULL) { + xmlUnlinkNode(meta); + xmlFreeNode(meta); + } + /* change the document only if there is a real encoding change */ + else if (xmlStrcasestr(content, encoding) == NULL) { + xmlSetProp(meta, BAD_CAST"content", BAD_CAST newcontent); + } + } + + + return(0); +} + +/** + * booleanHTMLAttrs: + * + * These are the HTML attributes which will be output + * in minimized form, i.e.

elements (William Brack and me) + - XPointer failure in XInclude are now handled as resource errors + - fixed xmllint --html to use the HTML serializer on output (added + --xmlout to implement the previous behaviour of saving it using the XML + serializer) + + +2.6.1: Oct 28 2003: + - Mostly bugfixes after the big 2.6.0 changes + - Unix compilation patches: libxml.m4 (Patrick Welche), warnings cleanup + (William Brack) + - Windows compilation patches (Joachim Bauch, Stephane Bidoul, Igor + Zlatkovic) + - xmlWriter bugfix (Alfred Mickautsch) + - chvalid.[ch]: couple of fixes from Stephane Bidoul + - context reset: error state reset, push parser reset (Graham + Bennett) + - context reuse: generate errors if file is not readable + - defaulted attributes for element coming from internal entities + (Stephane Bidoul) + - Python: tab and spaces mix (William Brack) + - Error handler could crash in DTD validation in 2.6.0 + - xmlReader: do not use the document or element _private field + - testSAX.c: avoid a problem with some PIs (Massimo Morara) + - general bug fixes: mandatory encoding in text decl, serializing + Document Fragment nodes, xmlSearchNs 2.6.0 problem (Kasimier Buchcik), + XPath errors not reported, slow HTML parsing of large documents. + + +2.6.0: Oct 20 2003: + - Major revision release: should be API and ABI compatible but got a lot + of change + - Increased the library modularity, far more options can be stripped out, + a --with-minimum configuration will weight around 160KBytes + - Use per parser and per document dictionary, allocate names and small + text nodes from the dictionary + - Switch to a SAX2 like parser rewrote most of the XML parser core, + provides namespace resolution and defaulted attributes, minimize memory + allocations and copies, namespace checking and specific error handling, + immutable buffers, make predefined entities static structures, etc... + - rewrote all the error handling in the library, all errors can be + intercepted at a structured level, with precise information + available. + - New simpler and more generic XML and HTML parser APIs, allowing to + easily modify the parsing options and reuse parser context for multiple + consecutive documents. + - Similar new APIs for the xmlReader, for options and reuse, provided new + functions to access content as const strings, use them for Python + bindings + - a lot of other smaller API improvements: xmlStrPrintf (Aleksey Sanin), + Walker i.e. reader on a document tree based on Alfred Mickautsch code, + make room in nodes for line numbers, reference counting and future PSVI + extensions, generation of character ranges to be checked with faster + algorithm (William), xmlParserMaxDepth (Crutcher Dunnavant), buffer + access + - New xmlWriter API provided by Alfred Mickautsch + - Schemas: base64 support by Anthony Carrico + - Parser<->HTTP integration fix, proper processing of the Mime-Type + and charset information if available. + - Relax-NG: bug fixes including the one reported by Martijn Faassen and + zeroOrMore, better error reporting. + - Python bindings (Stéphane Bidoul), never use stdout for errors + output + - Portability: all the headers have macros for export and calling + convention definitions (Igor Zlatkovic), VMS update (Craig A. Berry), + Windows: threads (Jesse Pelton), Borland compiler (Eric Zurcher, Igor), + Mingw (Igor), typos (Mark Vakoc), beta version (Stephane Bidoul), + warning cleanups on AIX and MIPS compilers (William Brack), BeOS (Marcin + 'Shard' Konicki) + - Documentation fixes and README (William Brack), search fix (William), + tutorial updates (John Fleck), namespace docs (Stefan Kost) + - Bug fixes: xmlCleanupParser (Dave Beckett), threading uninitialized + mutexes, HTML doctype lowercase, SAX/IO (William), compression detection + and restore (William), attribute declaration in DTDs (William), namespace + on attribute in HTML output (William), input filename (Rob Richards), + namespace DTD validation, xmlReplaceNode (Chris Ryland), I/O callbacks + (Markus Keim), CDATA serialization (Shaun McCance), xmlReader (Peter + Derr), high codepoint charref like 􏿿, buffer access in push + mode (Justin Fletcher), TLS threads on Windows (Jesse Pelton), XPath bug + (William), xmlCleanupParser (Marc Liyanage), CDATA output (William), HTTP + error handling. + - xmllint options: --dtdvalidfpi for Tobias Reif, --sax1 for compat + testing, --nodict for building without tree dictionary, --nocdata to + replace CDATA by text, --nsclean to remove surperfluous namespace + declarations + - added xml2-config --libtool-libs option from Kevin P. Fleming + - a lot of profiling and tuning of the code, speedup patch for + xmlSearchNs() by Luca Padovani. The xmlReader should do far less + allocation and it speed should get closer to SAX. Chris Anderson worked + on speeding and cleaning up repetitive checking code. + - cleanup of "make tests" + - libxml-2.0-uninstalled.pc from Malcolm Tredinnick + - deactivated the broken docBook SGML parser code and plugged the XML + parser instead. + + +2.5.11: Sep 9 2003: +A bugfix only release: - risk of crash in Relax-NG + - risk of crash when using multithreaded programs + + +2.5.10: Aug 15 2003: +A bugfixes only release - Windows Makefiles (William Brack) + - UTF-16 support fixes (Mark Itzcovitz) + - Makefile and portability (William Brack) automake, Linux alpha, Mingw + on Windows (Mikhail Grushinskiy) + - HTML parser (Oliver Stoeneberg) + - XInclude performance problem reported by Kevin Ruscoe + - XML parser performance problem reported by Grant Goodale + - xmlSAXParseDTD() bug fix from Malcolm Tredinnick + - and a couple other cleanup + + +2.5.9: Aug 9 2003: + - bugfixes: IPv6 portability, xmlHasNsProp (Markus Keim), Windows build + (Wiliam Brake, Jesse Pelton, Igor), Schemas (Peter Sobisch), threading + (Rob Richards), hexBinary type (), UTF-16 BOM (Dodji Seketeli), + xmlReader, Relax-NG schemas compilation, namespace handling, EXSLT (Sean + Griffin), HTML parsing problem (William Brack), DTD validation for mixed + content + namespaces, HTML serialization, library initialization, + progressive HTML parser + - better interfaces for Relax-NG error handling (Joachim Bauch, ) + - adding xmlXIncludeProcessTree() for XInclud'ing in a subtree + - doc fixes and improvements (John Fleck) + - configure flag for -with-fexceptions when embedding in C++ + - couple of new UTF-8 helper functions (William Brack) + - general encoding cleanup + ISO-8859-x without iconv (Peter Jacobi) + - xmlTextReader cleanup + enum for node types (Bjorn Reese) + - general compilation/warning cleanup Solaris/HP-UX/... (William + Brack) + + +2.5.8: Jul 6 2003: + - bugfixes: XPath, XInclude, file/URI mapping, UTF-16 save (Mark + Itzcovitz), UTF-8 checking, URI saving, error printing (William Brack), + PI related memleak, compilation without schemas or without xpath (Joerg + Schmitz-Linneweber/Garry Pennington), xmlUnlinkNode problem with DTDs, + rpm problem on , i86_64, removed a few compilation problems from 2.5.7, + xmlIOParseDTD, and xmlSAXParseDTD (Malcolm Tredinnick) + - portability: DJGPP (MsDos) , OpenVMS (Craig A. Berry) + - William Brack fixed multithreading lock problems + - IPv6 patch for FTP and HTTP accesses (Archana Shah/Wipro) + - Windows fixes (Igor Zlatkovic, Eric Zurcher), threading (Stéphane + Bidoul) + - A few W3C Schemas Structure improvements + - W3C Schemas Datatype improvements (Charlie Bozeman) + - Python bindings for thread globals (Stéphane Bidoul), and method/class + generator + - added --nonet option to xmllint + - documentation improvements (John Fleck) + + +2.5.7: Apr 25 2003: + - Relax-NG: Compiling to regexp and streaming validation on top of the + xmlReader interface, added to xmllint --stream + - xmlReader: Expand(), Next() and DOM access glue, bug fixes + - Support for large files: RGN validated a 4.5GB instance + - Thread support is now configured in by default + - Fixes: update of the Trio code (Bjorn), WXS Date and Duration fixes + (Charles Bozeman), DTD and namespaces (Brent Hendricks), HTML push parser + and zero bytes handling, some missing Windows file path conversions, + behaviour of the parser and validator in the presence of "out of memory" + error conditions + - extended the API to be able to plug a garbage collecting memory + allocator, added xmlMallocAtomic() and modified the allocations + accordingly. + - Performances: removed excessive malloc() calls, speedup of the push and + xmlReader interfaces, removed excessive thread locking + - Documentation: man page (John Fleck), xmlReader documentation + - Python: adding binding for xmlCatalogAddLocal (Brent M Hendricks) + + +2.5.6: Apr 1 2003: + - Fixed W3C XML Schemas datatype, should be compliant now except for + binHex and base64 which are not supported yet. + - bug fixes: non-ASCII IDs, HTML output, XInclude on large docs and + XInclude entities handling, encoding detection on external subsets, XML + Schemas bugs and memory leaks, HTML parser (James Bursa) + - portability: python/trio (Albert Chin), Sun compiler warnings + - documentation: added --relaxng option to xmllint man page (John) + - improved error reporting: xml:space, start/end tag mismatches, Relax NG + errors + + +2.5.5: Mar 24 2003: + - Lot of fixes on the Relax NG implementation. More testing including + DocBook and TEI examples. + - Increased the support for W3C XML Schemas datatype + - Several bug fixes in the URI handling layer + - Bug fixes: HTML parser, xmlReader, DTD validation, XPath, encoding + conversion, line counting in the parser. + - Added support for $XMLLINT_INDENT environment variable, FTP delete + - Fixed the RPM spec file name + + +2.5.4: Feb 20 2003: + - Conformance testing and lot of fixes on Relax NG and XInclude + implementation + - Implementation of XPointer element() scheme + - Bug fixes: XML parser, XInclude entities merge, validity checking on + namespaces, + 2 serialization bugs, node info generation problems, a DTD regexp + generation problem. + + - Portability: windows updates and path canonicalization (Igor) + - A few typo fixes (Kjartan Maraas) + - Python bindings generator fixes (Stephane Bidoul) + + +2.5.3: Feb 10 2003: + - RelaxNG and XML Schemas datatypes improvements, and added a first + version of RelaxNG Python bindings + - Fixes: XLink (Sean Chittenden), XInclude (Sean Chittenden), API fix for + serializing namespace nodes, encoding conversion bug, XHTML1 + serialization + - Portability fixes: Windows (Igor), AMD 64bits RPM spec file + + +2.5.2: Feb 5 2003: + - First implementation of RelaxNG, added --relaxng flag to xmllint + - Schemas support now compiled in by default. + - Bug fixes: DTD validation, namespace checking, XInclude and entities, + delegateURI in XML Catalogs, HTML parser, XML reader (Stéphane Bidoul), + XPath parser and evaluation, UTF8ToUTF8 serialization, XML reader memory + consumption, HTML parser, HTML serialization in the presence of + namespaces + - added an HTML API to check elements and attributes. + - Documentation improvement, PDF for the tutorial (John Fleck), doc + patches (Stefan Kost) + - Portability fixes: NetBSD (Julio Merino), Windows (Igor Zlatkovic) + - Added python bindings for XPointer, contextual error reporting + (Stéphane Bidoul) + - URI/file escaping problems (Stefano Zacchiroli) + + +2.5.1: Jan 8 2003: + - Fixes a memory leak and configuration/compilation problems in 2.5.0 + - documentation updates (John) + - a couple of XmlTextReader fixes + + +2.5.0: Jan 6 2003: + - New XmltextReader interface based on C# + API (with help of Stéphane Bidoul) + - Windows: more exports, including the new API (Igor) + - XInclude fallback fix + - Python: bindings for the new API, packaging (Stéphane Bidoul), + drv_libxml2.py Python xml.sax driver (Stéphane Bidoul), fixes, speedup + and iterators for Python-2.2 (Hannu Krosing) + - Tutorial fixes (john Fleck and Niraj Tolia) xmllint man update + (John) + - Fix an XML parser bug raised by Vyacheslav Pindyura + - Fix for VMS serialization (Nigel Hall) and config (Craig A. Berry) + - Entities handling fixes + - new API to optionally track node creation and deletion (Lukas + Schroeder) + - Added documentation for the XmltextReader interface and some XML guidelines + + +2.4.30: Dec 12 2002: + - 2.4.29 broke the python bindings, rereleasing + - Improvement/fixes of the XML API generator, and couple of minor code + fixes. + + +2.4.29: Dec 11 2002: + - Windows fixes (Igor): Windows CE port, pthread linking, python bindings + (Stéphane Bidoul), Mingw (Magnus Henoch), and export list updates + - Fix for prev in python bindings (ERDI Gergo) + - Fix for entities handling (Marcus Clarke) + - Refactored the XML and HTML dumps to a single code path, fixed XHTML1 + dump + - Fix for URI parsing when handling URNs with fragment identifiers + - Fix for HTTP URL escaping problem + - added an TextXmlReader (C#) like API (work in progress) + - Rewrote the API in XML generation script, includes a C parser and saves + more information needed for C# bindings + + +2.4.28: Nov 22 2002: + - a couple of python binding fixes + - 2 bug fixes in the XML push parser + - potential memory leak removed (Martin Stoilov) + - fix to the configure script for Unix (Dimitri Papadopoulos) + - added encoding support for XInclude parse="text" + - autodetection of XHTML1 and specific serialization rules added + - nasty threading bug fixed (William Brack) + + +2.4.27: Nov 17 2002: + - fixes for the Python bindings + - a number of bug fixes: SGML catalogs, xmlParseBalancedChunkMemory(), + HTML parser, Schemas (Charles Bozeman), document fragment support + (Christian Glahn), xmlReconciliateNs (Brian Stafford), XPointer, + xmlFreeNode(), xmlSAXParseMemory (Peter Jones), xmlGetNodePath (Petr + Pajas), entities processing + - added grep to xmllint --shell + - VMS update patch from Craig A. Berry + - cleanup of the Windows build with support for more compilers (Igor), + better thread support on Windows + - cleanup of Unix Makefiles and spec file + - Improvements to the documentation (John Fleck) + + +2.4.26: Oct 18 2002: + - Patches for Windows CE port, improvements on Windows paths handling + - Fixes to the validation code (DTD and Schemas), xmlNodeGetPath() , + HTML serialization, Namespace compliance, and a number of small + problems + + +2.4.25: Sep 26 2002: + - A number of bug fixes: XPath, validation, Python bindings, DOM and + tree, xmlI/O, Html + - Serious rewrite of XInclude + - Made XML Schemas regexp part of the default build and APIs, small fix + and improvement of the regexp core + - Changed the validation code to reuse XML Schemas regexp APIs + - Better handling of Windows file paths, improvement of Makefiles (Igor, + Daniel Gehriger, Mark Vakoc) + - Improved the python I/O bindings, the tests, added resolver and regexp + APIs + - New logos from Marc Liyanage + - Tutorial improvements: John Fleck, Christopher Harris + - Makefile: Fixes for AMD x86_64 (Mandrake), DESTDIR (Christophe + Merlet) + - removal of all stderr/perror use for error reporting + - Better error reporting: XPath and DTD validation + - update of the trio portability layer (Bjorn Reese) + +2.4.24: Aug 22 2002 - XPath fixes (William), xf:escape-uri() (Wesley Terpstra) + - Python binding fixes: makefiles (William), generator, rpm build, x86-64 + (fcrozat) + - HTML + +

+

404

+ +

Page not found :(

+
diff --git a/local-test-libxml2-delta-01/fuzz-tooling/docs/Gemfile b/local-test-libxml2-delta-01/fuzz-tooling/docs/Gemfile new file mode 100644 index 0000000000000000000000000000000000000000..d4c69e8dde6a6867a17684af6b0beacf87e66e86 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/docs/Gemfile @@ -0,0 +1,4 @@ +source "https://rubygems.org" +gem 'github-pages', group: :jekyll_plugins + +gem "webrick", "~> 1.8" diff --git a/local-test-libxml2-delta-01/fuzz-tooling/docs/README.md b/local-test-libxml2-delta-01/fuzz-tooling/docs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3ac9b680f016b16c169f26b55878328efa31a93d --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/docs/README.md @@ -0,0 +1,19 @@ +# Readme + +Use the following instructions to make documentation changes locally. + +## Prerequisites +```bash +$ sudo apt install ruby bundler +$ bundle config set path 'vendor/bundle' +$ bundle install +``` + +## Serving locally +```bash +$ bundle exec jekyll serve +``` + +## Theme documentation +We are using the [just the docs](https://just-the-docs.github.io/just-the-docs/) +theme. diff --git a/local-test-libxml2-delta-01/fuzz-tooling/docs/_config.yml b/local-test-libxml2-delta-01/fuzz-tooling/docs/_config.yml new file mode 100644 index 0000000000000000000000000000000000000000..c2e32b70b14e23392b44f4f9f2ea01f08533c873 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/docs/_config.yml @@ -0,0 +1,40 @@ +# Welcome to Jekyll! +# +# This config file is meant for settings that affect your whole blog, values +# which you are expected to set up once and rarely edit after that. If you find +# yourself editing this file very often, consider using Jekyll's data files +# feature for the data you need to update frequently. +# +# For technical reasons, this file is *NOT* reloaded automatically when you use +# 'bundle exec jekyll serve'. If you change this file, please restart the server process. + +# Site settings +# These are used to personalize your new site. If you look in the HTML files, +# you will see them accessed via {{ site.title }}, {{ site.email }}, and so on. +# You can create any custom variable you would like, and they will be accessible +# in the templates via {{ site.myvariable }}. +title: OSS-Fuzz +description: Documentation for OSS-Fuzz +baseurl: "/oss-fuzz" # the subpath of your site, e.g. /blog +url: "" # the base hostname & protocol for your site, e.g. http://example.com + +# Build settings +markdown: kramdown +remote_theme: pmarsceill/just-the-docs +search_enabled: true + +ga_tracking: G-LRX1V3S5P + +aux_links: + "OSS-Fuzz on GitHub": + - https://github.com/google/oss-fuzz + +# Exclude from processing. +exclude: + - Gemfile + - Gemfile.lock + - node_modules + - vendor/bundle/ + - vendor/cache/ + - vendor/gems/ + - vendor/ruby/ diff --git a/local-test-libxml2-delta-01/fuzz-tooling/docs/favicon.ico b/local-test-libxml2-delta-01/fuzz-tooling/docs/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..46a19509f373510556c04b529d8a3423f894ff9c Binary files /dev/null and b/local-test-libxml2-delta-01/fuzz-tooling/docs/favicon.ico differ diff --git a/local-test-libxml2-delta-01/fuzz-tooling/docs/ideal_integration.md b/local-test-libxml2-delta-01/fuzz-tooling/docs/ideal_integration.md new file mode 100644 index 0000000000000000000000000000000000000000..986e7bc1c034bb591c9b6671a1cfd5f488153c73 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/docs/ideal_integration.md @@ -0,0 +1 @@ +This page has moved [here](https://google.github.io/oss-fuzz/advanced-topics/ideal-integration) diff --git a/local-test-libxml2-delta-01/fuzz-tooling/docs/reproducing.md b/local-test-libxml2-delta-01/fuzz-tooling/docs/reproducing.md new file mode 100644 index 0000000000000000000000000000000000000000..77c79dfe2062a0d12628944692b01825cd5b69d3 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/docs/reproducing.md @@ -0,0 +1 @@ +This page has moved [here](https://google.github.io/oss-fuzz/advanced-topics/reproducing) diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/.dockerignore b/local-test-libxml2-delta-01/fuzz-tooling/infra/.dockerignore new file mode 100644 index 0000000000000000000000000000000000000000..c786533425754418b7e2256ae37c7624ff9eee11 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/.dockerignore @@ -0,0 +1,9 @@ +cifuzz/test_data/* + +# Copied from .gitignore. +.vscode/ +*.pyc +build +*~ +.DS_Store +*.swp \ No newline at end of file diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/MAINTAINERS.csv b/local-test-libxml2-delta-01/fuzz-tooling/infra/MAINTAINERS.csv new file mode 100644 index 0000000000000000000000000000000000000000..803827d833b91cfb3ad80f356737dded20dfa17b --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/MAINTAINERS.csv @@ -0,0 +1,7 @@ +Name,Email,Github Username +Adam Korcz,adam@adalogics.com,AdamKorcz +David Korczynski,david@adalogics.com,DavidKorczynski +Dongge Liu,donggeliu@google.com,Alan32Liu +Holly Gong,gongh@google.com,hogo6002 +Jonathan Metzman,metzman@google.com,jonathanmetzman +Oliver Chang,ochang@google.com,oliverchang \ No newline at end of file diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/README.md b/local-test-libxml2-delta-01/fuzz-tooling/infra/README.md new file mode 100644 index 0000000000000000000000000000000000000000..eff007eeaafa8c47b2ddb18cbd6a2fa44bb2c48a --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/README.md @@ -0,0 +1,23 @@ +# infra +> OSS-Fuzz project infrastructure + +Core infrastructure: +* [`base-images`](base-images/) - docker images for building fuzz targets & corresponding jenkins + pipeline. + +Continuous Integration infrastructure: + +* [`ci`](ci/) - script to build projects in CI. + +## helper.py +> script to automate common docker operations + +| Command | Description | +|---------|------------- +| `generate` | Generates skeleton files for a new project | +| `build_image` | Builds a docker image for a given project | +| `build_fuzzers` | Builds fuzz targets for a given project | +| `run_fuzzer` | Runs a fuzz target in a docker container | +| `coverage` | Runs fuzz target(s) in a docker container and generates a code coverage report. See [Code Coverage doc](https://google.github.io/oss-fuzz/advanced-topics/code-coverage/) | +| `reproduce` | Runs a testcase to reproduce a crash | +| `shell` | Starts a shell inside the docker image for a project | diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/bisector_test.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/bisector_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d93ac323980a19b308029e08a3a49d3ec650e749 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/bisector_test.py @@ -0,0 +1,70 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing perepo_managerissions and +# limitations under the License. +"""Test the functionality of bisection module: +1) Test a known case where an error appears in a regression range. +2) Bisect can handle incorrect inputs. + +IMPORTANT: This test needs to be run with root privileges. +""" + +import os +import unittest + +import bisector +import build_specified_commit +import test_repos + +# Necessary because __file__ changes with os.chdir +TEST_DIR_PATH = os.path.dirname(os.path.realpath(__file__)) + + +@unittest.skip('Test is too long to be run with presubmit.') +class BisectIntegrationTests(unittest.TestCase): + """Class to test the functionality of bisection method.""" + + BISECT_TYPE = 'regressed' + + def test_bisect_invalid_repo(self): + """Test the bisection method on a project that does not exist.""" + test_repo = test_repos.INVALID_REPO + build_data = build_specified_commit.BuildData( + project_name=test_repo.project_name, + engine='libfuzzer', + sanitizer='address', + architecture='x86_64') + with self.assertRaises(ValueError): + bisector.bisect(self.BISECT_TYPE, test_repo.old_commit, + test_repo.new_commit, test_repo.testcase_path, + test_repo.fuzz_target, build_data) + + def test_bisect(self): + """Test the bisect method on example projects.""" + for test_repo in test_repos.TEST_REPOS: + if test_repo.new_commit: + build_data = build_specified_commit.BuildData( + project_name=test_repo.project_name, + engine='libfuzzer', + sanitizer='address', + architecture='x86_64') + result = bisector.bisect(self.BISECT_TYPE, test_repo.old_commit, + test_repo.new_commit, test_repo.testcase_path, + test_repo.fuzz_target, build_data) + self.assertEqual(result.commit, test_repo.intro_commit) + + +if __name__ == '__main__': + # Change to oss-fuzz main directory so helper.py runs correctly. + if os.getcwd() != os.path.dirname(TEST_DIR_PATH): + os.chdir(os.path.dirname(TEST_DIR_PATH)) + unittest.main() diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/build_fuzzers.Dockerfile b/local-test-libxml2-delta-01/fuzz-tooling/infra/build_fuzzers.Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..be53f720f7c00e3aea27ce03b1baaf9b08b35f25 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/build_fuzzers.Dockerfile @@ -0,0 +1,31 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ +# Docker image to run fuzzers for CIFuzz (the run_fuzzers action on GitHub +# actions). + +FROM ghcr.io/aixcc-finals/cifuzz-base + +# Python file to execute when the docker container starts up +# We can't use the env var $OSS_FUZZ_ROOT here. Since it's a constant env var, +# just expand to '/opt/oss-fuzz'. +ENTRYPOINT ["python3", "/opt/oss-fuzz/infra/cifuzz/build_fuzzers_entrypoint.py"] + +WORKDIR ${OSS_FUZZ_ROOT}/infra + +# Update infra source code. +ADD . ${OSS_FUZZ_ROOT}/infra + +RUN python3 -m pip install -r ${OSS_FUZZ_ROOT}/infra/cifuzz/requirements.txt \ No newline at end of file diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/build_specified_commit.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/build_specified_commit.py new file mode 100644 index 0000000000000000000000000000000000000000..f1ad21866095ef5becb11623a11272b2dc13cce7 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/build_specified_commit.py @@ -0,0 +1,410 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Module to build a image from a specific commit, branch or pull request. + +This module is allows each of the OSS Fuzz projects fuzzers to be built +from a specific point in time. This feature can be used for implementations +like continuious integration fuzzing and bisection to find errors +""" +import argparse +import bisect +import datetime +import os +import collections +import json +import logging +import re +import shutil +import tempfile + +import helper +import repo_manager +import retry +import utils + +BuildData = collections.namedtuple( + 'BuildData', ['project_name', 'engine', 'sanitizer', 'architecture']) + +_GIT_DIR_MARKER = 'gitdir: ' +_IMAGE_BUILD_TRIES = 3 + + +class BaseBuilderRepo: + """Repo of base-builder images.""" + + def __init__(self): + self.timestamps = [] + self.digests = [] + + def add_digest(self, timestamp, digest): + """Add a digest.""" + self.timestamps.append(timestamp) + self.digests.append(digest) + + def find_digest(self, timestamp): + """Find the latest image before the given timestamp.""" + index = bisect.bisect_right(self.timestamps, timestamp) + if index > 0: + return self.digests[index - 1] + + logging.error('Failed to find suitable base-builder.') + return None + + +def _replace_gitdir(src_dir, file_path): + """Replace gitdir with a relative path.""" + with open(file_path) as handle: + lines = handle.readlines() + + new_lines = [] + for line in lines: + if line.startswith(_GIT_DIR_MARKER): + absolute_path = line[len(_GIT_DIR_MARKER):].strip() + if not os.path.isabs(absolute_path): + # Already relative. + return + + current_dir = os.path.dirname(file_path) + # Rebase to /src rather than the host src dir. + base_dir = current_dir.replace(src_dir, '/src') + relative_path = os.path.relpath(absolute_path, base_dir) + logging.info('Replacing absolute submodule gitdir from %s to %s', + absolute_path, relative_path) + + line = _GIT_DIR_MARKER + relative_path + + new_lines.append(line) + + with open(file_path, 'w') as handle: + handle.write(''.join(new_lines)) + + +def _make_gitdirs_relative(src_dir): + """Make gitdirs relative.""" + for root_dir, _, files in os.walk(src_dir): + for filename in files: + if filename != '.git': + continue + + file_path = os.path.join(root_dir, filename) + _replace_gitdir(src_dir, file_path) + + +def _replace_base_builder_digest(dockerfile_path, digest): + """Replace the base-builder digest in a Dockerfile.""" + with open(dockerfile_path) as handle: + lines = handle.readlines() + + new_lines = [] + for line in lines: + if line.strip().startswith('FROM'): + line = 'FROM ghcr.io/aixcc-finals/base-builder@' + digest + '\n' + + new_lines.append(line) + + with open(dockerfile_path, 'w') as handle: + handle.write(''.join(new_lines)) + + +def copy_src_from_docker(project_name, host_dir): + """Copy /src from docker to the host.""" + # Copy /src to host. + image_name = 'gcr.io/oss-fuzz/' + project_name + src_dir = os.path.join(host_dir, 'src') + if os.path.exists(src_dir): + shutil.rmtree(src_dir, ignore_errors=True) + + docker_args = [ + '-v', + host_dir + ':/out', + image_name, + 'cp', + '-r', + '-p', + '/src', + '/out', + ] + helper.docker_run(docker_args) + + # Submodules can have gitdir entries which point to absolute paths. Make them + # relative, as otherwise we can't do operations on the checkout on the host. + _make_gitdirs_relative(src_dir) + return src_dir + + +@retry.wrap(_IMAGE_BUILD_TRIES, 2) +def _build_image_with_retries(project_name): + """Build image with retries.""" + return helper.build_image_impl(helper.Project(project_name)) + + +def get_required_post_checkout_steps(dockerfile_path): + """Get required post checkout steps (best effort).""" + + checkout_pattern = re.compile(r'\s*RUN\s*(git|svn|hg)') + + # If the build.sh is copied from upstream, we need to copy it again after + # changing the revision to ensure correct building. + post_run_pattern = re.compile(r'\s*RUN\s*(.*build\.sh.*(\$SRC|/src).*)') + + with open(dockerfile_path) as handle: + lines = handle.readlines() + + subsequent_run_cmds = [] + for i, line in enumerate(lines): + if checkout_pattern.match(line): + subsequent_run_cmds = [] + continue + + match = post_run_pattern.match(line) + if match: + workdir = helper.workdir_from_lines(lines[:i]) + command = match.group(1) + subsequent_run_cmds.append((workdir, command)) + + return subsequent_run_cmds + + +# pylint: disable=too-many-locals +def build_fuzzers_from_commit(commit, + build_repo_manager, + host_src_path, + build_data, + base_builder_repo=None): + """Builds a OSS-Fuzz fuzzer at a specific commit SHA. + + Args: + commit: The commit SHA to build the fuzzers at. + build_repo_manager: The OSS-Fuzz project's repo manager to be built at. + build_data: A struct containing project build information. + base_builder_repo: A BaseBuilderRepo. + Returns: + 0 on successful build or error code on failure. + """ + oss_fuzz_repo_manager = repo_manager.RepoManager(helper.OSS_FUZZ_DIR) + num_retry = 1 + + def cleanup(): + # Re-copy /src for a clean checkout every time. + copy_src_from_docker(build_data.project_name, + os.path.dirname(host_src_path)) + build_repo_manager.fetch_all_remotes() + + projects_dir = os.path.join('projects', build_data.project_name) + dockerfile_path = os.path.join(projects_dir, 'Dockerfile') + + for i in range(num_retry + 1): + build_repo_manager.checkout_commit(commit, clean=False) + + post_checkout_steps = get_required_post_checkout_steps(dockerfile_path) + for workdir, post_checkout_step in post_checkout_steps: + logging.info('Running post-checkout step `%s` in %s.', post_checkout_step, + workdir) + helper.docker_run([ + '-w', + workdir, + '-v', + host_src_path + ':' + '/src', + 'gcr.io/oss-fuzz/' + build_data.project_name, + '/bin/bash', + '-c', + post_checkout_step, + ]) + + project = helper.Project(build_data.project_name) + result = helper.build_fuzzers_impl(project=project, + clean=True, + engine=build_data.engine, + sanitizer=build_data.sanitizer, + architecture=build_data.architecture, + env_to_add=None, + source_path=host_src_path, + mount_path='/src') + if result or i == num_retry: + break + + # Retry with an OSS-Fuzz builder container that's closer to the project + # commit date. + commit_date = build_repo_manager.commit_date(commit) + + # Find first change in the projects/ directory before the project + # commit date. + oss_fuzz_commit, _, _ = oss_fuzz_repo_manager.git([ + 'log', '--before=' + commit_date.isoformat(), '-n1', '--format=%H', + projects_dir + ], + check_result=True) + oss_fuzz_commit = oss_fuzz_commit.strip() + if not oss_fuzz_commit: + logging.info( + 'Could not find first OSS-Fuzz commit prior to upstream commit. ' + 'Falling back to oldest integration commit.') + + # Find the oldest commit. + oss_fuzz_commit, _, _ = oss_fuzz_repo_manager.git( + ['log', '--reverse', '--format=%H', projects_dir], check_result=True) + + oss_fuzz_commit = oss_fuzz_commit.splitlines()[0].strip() + + if not oss_fuzz_commit: + logging.error('Failed to get oldest integration commit.') + break + + logging.info('Build failed. Retrying on earlier OSS-Fuzz commit %s.', + oss_fuzz_commit) + + # Check out projects/ dir to the commit that was found. + oss_fuzz_repo_manager.git(['checkout', oss_fuzz_commit, projects_dir], + check_result=True) + + # Also use the closest base-builder we can find. + if base_builder_repo: + base_builder_digest = base_builder_repo.find_digest(commit_date) + if not base_builder_digest: + return False + + logging.info('Using base-builder with digest %s.', base_builder_digest) + _replace_base_builder_digest(dockerfile_path, base_builder_digest) + + # Rebuild image and re-copy src dir since things in /src could have changed. + if not _build_image_with_retries(build_data.project_name): + logging.error('Failed to rebuild image.') + return False + + cleanup() + + cleanup() + return result + + +def detect_main_repo(project_name, repo_name=None, commit=None): + """Checks a docker image for the main repo of an OSS-Fuzz project. + + Note: The default is to use the repo name to detect the main repo. + + Args: + project_name: The name of the oss-fuzz project. + repo_name: The name of the main repo in an OSS-Fuzz project. + commit: A commit SHA that is associated with the main repo. + + Returns: + A tuple containing (the repo's origin, the repo's path). + """ + + if not repo_name and not commit: + logging.error( + 'Error: can not detect main repo without a repo_name or a commit.') + return None, None + if repo_name and commit: + logging.info( + 'Both repo name and commit specific. Using repo name for detection.') + + # Change to oss-fuzz main directory so helper.py runs correctly. + utils.chdir_to_root() + if not _build_image_with_retries(project_name): + logging.error('Error: building %s image failed.', project_name) + return None, None + docker_image_name = 'gcr.io/oss-fuzz/' + project_name + command_to_run = [ + 'docker', 'run', '--rm', '-t', docker_image_name, 'python3', + os.path.join('/opt', 'cifuzz', 'detect_repo.py') + ] + if repo_name: + command_to_run.extend(['--repo_name', repo_name]) + else: + command_to_run.extend(['--example_commit', commit]) + out, _, _ = utils.execute(command_to_run) + match = re.search(r'\bDetected repo: ([^ ]+) ([^ ]+)', out.rstrip()) + if match and match.group(1) and match.group(2): + return match.group(1), match.group(2) + + logging.error('Failed to detect repo:\n%s', out) + return None, None + + +def load_base_builder_repo(): + """Get base-image digests.""" + gcloud_path = shutil.which('gcloud') + if not gcloud_path: + logging.warning('gcloud not found in PATH.') + return None + + result, _, _ = utils.execute([ + gcloud_path, + 'container', + 'images', + 'list-tags', + 'ghcr.io/aixcc-finals/base-builder', + '--format=json', + '--sort-by=timestamp', + ], + check_result=True) + result = json.loads(result) + + repo = BaseBuilderRepo() + for image in result: + timestamp = datetime.datetime.fromisoformat( + image['timestamp']['datetime']).astimezone(datetime.timezone.utc) + repo.add_digest(timestamp, image['digest']) + + return repo + + +def main(): + """Main function.""" + logging.getLogger().setLevel(logging.INFO) + + parser = argparse.ArgumentParser( + description='Build fuzzers at a specific commit') + parser.add_argument('--project_name', + help='The name of the project where the bug occurred.', + required=True) + parser.add_argument('--commit', + help='The newest commit SHA to be bisected.', + required=True) + parser.add_argument('--engine', + help='The default is "libfuzzer".', + default='libfuzzer') + parser.add_argument('--sanitizer', + default='address', + help='The default is "address".') + parser.add_argument('--architecture', default='x86_64') + + args = parser.parse_args() + + repo_url, repo_path = detect_main_repo(args.project_name, commit=args.commit) + + if not repo_url or not repo_path: + raise ValueError('Main git repo can not be determined.') + + with tempfile.TemporaryDirectory() as tmp_dir: + host_src_dir = copy_src_from_docker(args.project_name, tmp_dir) + build_repo_manager = repo_manager.RepoManager( + os.path.join(host_src_dir, os.path.basename(repo_path))) + base_builder_repo = load_base_builder_repo() + + build_data = BuildData(project_name=args.project_name, + engine=args.engine, + sanitizer=args.sanitizer, + architecture=args.architecture) + if not build_fuzzers_from_commit(args.commit, + build_repo_manager, + host_src_dir, + build_data, + base_builder_repo=base_builder_repo): + raise RuntimeError('Failed to build.') + + +if __name__ == '__main__': + main() diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/build_specified_commit_test.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/build_specified_commit_test.py new file mode 100644 index 0000000000000000000000000000000000000000..00f50947f711e03cdb5190284808b069eab99864 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/build_specified_commit_test.py @@ -0,0 +1,126 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test the functionality of the build image from commit module. +The will consist of the following functional tests: + 1. The inference of the main repo for a specific project. + 2. The building of a projects fuzzers from a specific commit. + +""" +import os +import tempfile +import unittest + +import build_specified_commit +import helper +import repo_manager +import test_repos + +# necessary because __file__ changes with os.chdir +TEST_DIR_PATH = os.path.dirname(os.path.realpath(__file__)) + + +@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'), + 'INTEGRATION_TESTS=1 not set') +class BuildImageIntegrationTest(unittest.TestCase): + """Tests if an image can be built from different states e.g. a commit.""" + + @unittest.skip('Test is failing (spuriously?).') + def test_build_fuzzers_from_commit(self): + """Tests if the fuzzers can build at a specified commit. + + This is done by using a known regression range for a specific test case. + The old commit should show the error when its fuzzers run and the new one + should not. + """ + with tempfile.TemporaryDirectory() as tmp_dir: + test_repo = test_repos.TEST_REPOS[1] + self.assertTrue(helper.build_image_impl(test_repo.project_name)) + host_src_dir = build_specified_commit.copy_src_from_docker( + test_repo.project_name, tmp_dir) + + test_repo_manager = repo_manager.clone_repo_and_get_manager( + test_repo.git_url, host_src_dir, test_repo.oss_repo_name) + build_data = build_specified_commit.BuildData( + sanitizer='address', + architecture='x86_64', + engine='libfuzzer', + project_name=test_repo.project_name) + + build_specified_commit.build_fuzzers_from_commit(test_repo.old_commit, + test_repo_manager, + host_src_dir, build_data) + project = helper.Project(test_repo.project_name) + old_result = helper.reproduce_impl(project=project, + fuzzer_name=test_repo.fuzz_target, + valgrind=False, + env_to_add=[], + fuzzer_args=[], + testcase_path=test_repo.testcase_path) + build_specified_commit.build_fuzzers_from_commit(test_repo.project_name, + test_repo_manager, + host_src_dir, build_data) + new_result = helper.reproduce_impl(project=project, + fuzzer_name=test_repo.fuzz_target, + valgrind=False, + env_to_add=[], + fuzzer_args=[], + testcase_path=test_repo.testcase_path) + self.assertNotEqual(new_result, old_result) + + def test_detect_main_repo_from_commit(self): + """Test the detect main repo function from build specific commit module.""" + # TODO(metzman): Fix these tests so they don't randomly break because of + # changes in the outside world. + for example_repo in test_repos.TEST_REPOS: + if example_repo.new_commit: + # TODO(metzman): This function calls _build_image_with_retries which + # has a long delay (30 seconds). Figure out how to make this quicker. + repo_origin, repo_name = build_specified_commit.detect_main_repo( + example_repo.project_name, commit=example_repo.new_commit) + self.assertEqual(repo_origin, example_repo.git_url) + self.assertEqual(repo_name, + os.path.join('/src', example_repo.oss_repo_name)) + + repo_origin, repo_name = build_specified_commit.detect_main_repo( + test_repos.INVALID_REPO.project_name, + test_repos.INVALID_REPO.new_commit) + self.assertIsNone(repo_origin) + self.assertIsNone(repo_name) + + def test_detect_main_repo_from_name(self): + """Test the detect main repo function from build specific commit module.""" + for example_repo in test_repos.TEST_REPOS: + if example_repo.project_name == 'gonids': + # It's unclear how this test ever passed, but we can't infer the repo + # because gonids doesn't really check it out, it uses "go get". + continue + repo_origin, repo_name = build_specified_commit.detect_main_repo( + example_repo.project_name, repo_name=example_repo.git_repo_name) + self.assertEqual(repo_origin, example_repo.git_url) + self.assertEqual( + repo_name, + os.path.join(example_repo.image_location, example_repo.oss_repo_name)) + + repo_origin, repo_name = build_specified_commit.detect_main_repo( + test_repos.INVALID_REPO.project_name, + test_repos.INVALID_REPO.oss_repo_name) + self.assertIsNone(repo_origin) + self.assertIsNone(repo_name) + + +if __name__ == '__main__': + # Change to oss-fuzz main directory so helper.py runs correctly. + if os.getcwd() != os.path.dirname(TEST_DIR_PATH): + os.chdir(os.path.dirname(TEST_DIR_PATH)) + unittest.main() diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/constants.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..cd9b40d1fc3f1130c2ee3c03f45b6d759fd74d05 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/constants.py @@ -0,0 +1,49 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ +"""Constants for OSS-Fuzz.""" + +DEFAULT_EXTERNAL_BUILD_INTEGRATION_PATH = '.clusterfuzzlite' + +DEFAULT_LANGUAGE = 'c++' +DEFAULT_SANITIZER = 'address' +DEFAULT_ARCHITECTURE = 'x86_64' +DEFAULT_ENGINE = 'libfuzzer' +LANGUAGES = [ + 'c', + 'c++', + 'go', + 'javascript', + 'jvm', + 'python', + 'rust', + 'swift', + 'ruby', +] +LANGUAGES_WITH_COVERAGE_SUPPORT = [ + 'c', 'c++', 'go', 'jvm', 'python', 'rust', 'swift', 'javascript', 'ruby' +] +SANITIZERS = [ + 'address', + 'none', + 'memory', + 'undefined', + 'thread', + 'coverage', + 'introspector', + 'hwaddress', +] +ARCHITECTURES = ['i386', 'x86_64', 'aarch64'] +ENGINES = ['libfuzzer', 'afl', 'honggfuzz', 'centipede', 'none', 'wycheproof'] diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/helper.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..0aefe880791ec4193cedf5fb5986c94a711e9dfc --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/helper.py @@ -0,0 +1,1810 @@ +#!/usr/bin/env python +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ +"""Helper script for OSS-Fuzz users. Can do common tasks like building +projects/fuzzers, running them etc.""" + +from __future__ import print_function +from multiprocessing.dummy import Pool as ThreadPool +import argparse +import datetime +import errno +import logging +import os +import re +import shlex +import shutil +import subprocess +import sys +import tempfile + +import constants +import templates + +OSS_FUZZ_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +BUILD_DIR = os.path.join(OSS_FUZZ_DIR, 'build') + +BASE_IMAGE_TAG = ':v1.2.1' # no tag for latest + +BASE_RUNNER_IMAGE = f'ghcr.io/aixcc-finals/base-runner{BASE_IMAGE_TAG}' + +BASE_IMAGES = { + 'generic': [ + f'ghcr.io/aixcc-finals/base-image{BASE_IMAGE_TAG}', + f'ghcr.io/aixcc-finals/base-clang{BASE_IMAGE_TAG}', + f'ghcr.io/aixcc-finals/base-builder{BASE_IMAGE_TAG}', + BASE_RUNNER_IMAGE, + f'ghcr.io/aixcc-finals/base-runner-debug{BASE_IMAGE_TAG}', + ], + 'go': [f'ghcr.io/aixcc-finals/base-builder-go{BASE_IMAGE_TAG}'], + 'javascript': [f'ghcr.io/aixcc-finals/base-builder-javascript{BASE_IMAGE_TAG}'], + 'jvm': [f'ghcr.io/aixcc-finals/base-builder-jvm{BASE_IMAGE_TAG}'], + 'python': [f'ghcr.io/aixcc-finals/base-builder-python{BASE_IMAGE_TAG}'], + 'rust': [f'ghcr.io/aixcc-finals/base-builder-rust{BASE_IMAGE_TAG}'], + 'ruby': [f'ghcr.io/aixcc-finals/base-builder-ruby{BASE_IMAGE_TAG}'], + 'swift': [f'ghcr.io/aixcc-finals/base-builder-swift{BASE_IMAGE_TAG}'], +} + +VALID_PROJECT_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_-]+$') +MAX_PROJECT_NAME_LENGTH = 26 + +CORPUS_URL_FORMAT = ( + 'gs://{project_name}-corpus.clusterfuzz-external.appspot.com/libFuzzer/' + '{fuzz_target}/') +CORPUS_BACKUP_URL_FORMAT = ( + 'gs://{project_name}-backup.clusterfuzz-external.appspot.com/corpus/' + 'libFuzzer/{fuzz_target}/') + +HTTPS_CORPUS_BACKUP_URL_FORMAT = ( + 'https://storage.googleapis.com/{project_name}-backup.clusterfuzz-external' + '.appspot.com/corpus/libFuzzer/{fuzz_target}/public.zip') + +LANGUAGE_REGEX = re.compile(r'[^\s]+') +PROJECT_LANGUAGE_REGEX = re.compile(r'\s*language\s*:\s*([^\s]+)') + +WORKDIR_REGEX = re.compile(r'\s*WORKDIR\s*([^\s]+)') + +# Regex to match special chars in project name. +SPECIAL_CHARS_REGEX = re.compile('[^a-zA-Z0-9_-]') + +LANGUAGE_TO_BASE_BUILDER_IMAGE = { + 'c': 'base-builder', + 'c++': 'base-builder', + 'go': 'base-builder-go', + 'javascript': 'base-builder-javascript', + 'jvm': 'base-builder-jvm', + 'python': 'base-builder-python', + 'ruby': 'base-builder-ruby', + 'rust': 'base-builder-rust', + 'swift': 'base-builder-swift' +} +ARM_BUILDER_NAME = 'oss-fuzz-buildx-builder' + +CLUSTERFUZZLITE_ENGINE = 'libfuzzer' +CLUSTERFUZZLITE_ARCHITECTURE = 'x86_64' +CLUSTERFUZZLITE_FILESTORE_DIR = 'filestore' +CLUSTERFUZZLITE_DOCKER_IMAGE = 'ghcr.io/aixcc-finals/cifuzz-run-fuzzers' + +logger = logging.getLogger(__name__) + +if sys.version_info[0] >= 3: + raw_input = input # pylint: disable=invalid-name + +# pylint: disable=too-many-lines + + +class Project: + """Class representing a project that is in OSS-Fuzz or an external project + (ClusterFuzzLite user).""" + + def __init__( + self, + project_name_or_path, + is_external=False, + build_integration_path=constants.DEFAULT_EXTERNAL_BUILD_INTEGRATION_PATH): + self.is_external = is_external + if self.is_external: + self.path = os.path.abspath(project_name_or_path) + self.name = os.path.basename(self.path) + self.build_integration_path = os.path.join(self.path, + build_integration_path) + else: + self.name = project_name_or_path + self.path = os.path.join(OSS_FUZZ_DIR, 'projects', self.name) + self.build_integration_path = self.path + + @property + def dockerfile_path(self): + """Returns path to the project Dockerfile.""" + return os.path.join(self.build_integration_path, 'Dockerfile') + + @property + def language(self): + """Returns project language.""" + project_yaml_path = os.path.join(self.build_integration_path, + 'project.yaml') + if not os.path.exists(project_yaml_path): + logger.warning('No project.yaml. Assuming c++.') + return constants.DEFAULT_LANGUAGE + + with open(project_yaml_path) as file_handle: + content = file_handle.read() + for line in content.splitlines(): + match = PROJECT_LANGUAGE_REGEX.match(line) + if match: + return match.group(1) + + logger.warning('Language not specified in project.yaml. Assuming c++.') + return constants.DEFAULT_LANGUAGE + + @property + def coverage_extra_args(self): + """Returns project coverage extra args.""" + project_yaml_path = os.path.join(self.build_integration_path, + 'project.yaml') + if not os.path.exists(project_yaml_path): + logger.warning('project.yaml not found: %s.', project_yaml_path) + return '' + + with open(project_yaml_path) as file_handle: + content = file_handle.read() + + coverage_flags = '' + read_coverage_extra_args = False + # Pass the yaml file and extract the value of the coverage_extra_args key. + # This is naive yaml parsing and we do not handle comments at this point. + for line in content.splitlines(): + if read_coverage_extra_args: + # Break reading coverage args if a new yaml key is defined. + if len(line) > 0 and line[0] != ' ': + break + coverage_flags += line + if 'coverage_extra_args' in line: + read_coverage_extra_args = True + # Include the first line only if it's not a multi-line value. + if 'coverage_extra_args: >' not in line: + coverage_flags += line.replace('coverage_extra_args: ', '') + return coverage_flags + + @property + def out(self): + """Returns the out dir for the project. Creates it if needed.""" + return _get_out_dir(self.name) + + @property + def work(self): + """Returns the out dir for the project. Creates it if needed.""" + return _get_project_build_subdir(self.name, 'work') + + @property + def corpus(self): + """Returns the out dir for the project. Creates it if needed.""" + return _get_project_build_subdir(self.name, 'corpus') + + +def main(): # pylint: disable=too-many-branches,too-many-return-statements + """Gets subcommand from program arguments and does it. Returns 0 on success 1 + on error.""" + logging.basicConfig(level=logging.INFO) + parser = get_parser() + args = parse_args(parser) + + # Need to do this before chdir. + # TODO(https://github.com/google/oss-fuzz/issues/6758): Get rid of chdir. + if hasattr(args, 'testcase_path'): + args.testcase_path = _get_absolute_path(args.testcase_path) + # Note: this has to happen after parse_args above as parse_args needs to know + # the original CWD for external projects. + os.chdir(OSS_FUZZ_DIR) + if not os.path.exists(BUILD_DIR): + os.mkdir(BUILD_DIR) + + # We have different default values for `sanitizer` depending on the `engine`. + # Some commands do not have `sanitizer` argument, so `hasattr` is necessary. + if hasattr(args, 'sanitizer') and not args.sanitizer: + if args.project.language == 'javascript': + args.sanitizer = 'none' + else: + args.sanitizer = constants.DEFAULT_SANITIZER + + if args.command == 'generate': + result = generate(args) + elif args.command == 'build_image': + result = build_image(args) + elif args.command == 'build_fuzzers': + result = build_fuzzers(args) + elif args.command == 'fuzzbench_build_fuzzers': + result = fuzzbench_build_fuzzers(args) + elif args.command == 'fuzzbench_run_fuzzer': + result = fuzzbench_run_fuzzer(args) + elif args.command == 'fuzzbench_measure': + result = fuzzbench_measure(args) + elif args.command == 'check_build': + result = check_build(args) + elif args.command == 'download_corpora': + result = download_corpora(args) + elif args.command == 'run_fuzzer': + result = run_fuzzer(args) + elif args.command == 'coverage': + result = coverage(args) + elif args.command == 'introspector': + result = introspector(args) + elif args.command == 'reproduce': + result = reproduce(args) + if args.propagate_exit_codes: + return result + elif args.command == 'shell': + result = shell(args) + elif args.command == 'pull_images': + result = pull_images() + elif args.command == 'run_clusterfuzzlite': + result = run_clusterfuzzlite(args) + else: + # Print help string if no arguments provided. + parser.print_help() + result = False + return bool_to_retcode(result) + + +def bool_to_retcode(boolean): + """Returns 0 if |boolean| is Truthy, 0 is the standard return code for a + successful process execution. Returns 1 otherwise, indicating the process + failed.""" + return 0 if boolean else 1 + + +def parse_args(parser, args=None): + """Parses |args| using |parser| and returns parsed args. Also changes + |args.build_integration_path| to have correct default behavior.""" + # Use default argument None for args so that in production, argparse does its + # normal behavior, but unittesting is easier. + parsed_args = parser.parse_args(args) + project = getattr(parsed_args, 'project', None) + if not project: + return parsed_args + + # Use hacky method for extracting attributes so that ShellTest works. + # TODO(metzman): Fix this. + is_external = getattr(parsed_args, 'external', False) + parsed_args.project = Project(parsed_args.project, is_external) + return parsed_args + + +def _add_external_project_args(parser): + parser.add_argument( + '--external', + help='Is project external?', + default=False, + action='store_true', + ) + + +def get_parser(): # pylint: disable=too-many-statements,too-many-locals + """Returns an argparse parser.""" + parser = argparse.ArgumentParser('helper.py', description='oss-fuzz helpers') + subparsers = parser.add_subparsers(dest='command') + + generate_parser = subparsers.add_parser( + 'generate', help='Generate files for new project.') + generate_parser.add_argument('project') + generate_parser.add_argument('--language', + default=constants.DEFAULT_LANGUAGE, + choices=LANGUAGE_TO_BASE_BUILDER_IMAGE.keys(), + help='Project language.') + _add_external_project_args(generate_parser) + + build_image_parser = subparsers.add_parser('build_image', + help='Build an image.') + build_image_parser.add_argument('project') + build_image_parser.add_argument('--pull', + action='store_true', + help='Pull latest base image.') + _add_architecture_args(build_image_parser) + build_image_parser.add_argument('--cache', + action='store_true', + default=False, + help='Use docker cache when building image.') + build_image_parser.add_argument('--no-pull', + action='store_true', + help='Do not pull latest base image.') + build_image_parser.add_argument('--docker_image_tag', + dest='docker_image_tag', + default='latest', + help='docker image build tag' + 'default: latest') + _add_external_project_args(build_image_parser) + + build_fuzzers_parser = subparsers.add_parser( + 'build_fuzzers', help='Build fuzzers for a project.') + _add_architecture_args(build_fuzzers_parser) + _add_engine_args(build_fuzzers_parser) + _add_sanitizer_args(build_fuzzers_parser) + _add_environment_args(build_fuzzers_parser) + _add_external_project_args(build_fuzzers_parser) + build_fuzzers_parser.add_argument('project') + build_fuzzers_parser.add_argument('source_path', + help='path of local source', + nargs='?') + build_fuzzers_parser.add_argument('--mount_path', + dest='mount_path', + help='path to mount local source in ' + '(defaults to WORKDIR)') + build_fuzzers_parser.add_argument('--clean', + dest='clean', + action='store_true', + help='clean existing artifacts.') + build_fuzzers_parser.add_argument('--no-clean', + dest='clean', + action='store_false', + help='do not clean existing artifacts ' + '(default).') + build_fuzzers_parser.add_argument('--docker_image_tag', + dest='docker_image_tag', + default='latest', + help='docker image build tag' + 'default: latest') + build_fuzzers_parser.set_defaults(clean=False) + + fuzzbench_build_fuzzers_parser = subparsers.add_parser( + 'fuzzbench_build_fuzzers') + _add_architecture_args(fuzzbench_build_fuzzers_parser) + fuzzbench_build_fuzzers_parser.add_argument('--engine') + _add_sanitizer_args(fuzzbench_build_fuzzers_parser) + _add_environment_args(fuzzbench_build_fuzzers_parser) + _add_external_project_args(fuzzbench_build_fuzzers_parser) + fuzzbench_build_fuzzers_parser.add_argument('project') + check_build_parser = subparsers.add_parser( + 'check_build', help='Checks that fuzzers execute without errors.') + _add_architecture_args(check_build_parser) + _add_engine_args(check_build_parser, choices=constants.ENGINES) + _add_sanitizer_args(check_build_parser, choices=constants.SANITIZERS) + _add_environment_args(check_build_parser) + check_build_parser.add_argument('project', + help='name of the project or path (external)') + check_build_parser.add_argument('fuzzer_name', + help='name of the fuzzer', + nargs='?') + _add_external_project_args(check_build_parser) + + run_fuzzer_parser = subparsers.add_parser( + 'run_fuzzer', help='Run a fuzzer in the emulated fuzzing environment.') + _add_architecture_args(run_fuzzer_parser) + _add_engine_args(run_fuzzer_parser) + _add_sanitizer_args(run_fuzzer_parser) + _add_environment_args(run_fuzzer_parser) + _add_external_project_args(run_fuzzer_parser) + run_fuzzer_parser.add_argument( + '--corpus-dir', help='directory to store corpus for the fuzz target') + run_fuzzer_parser.add_argument('project', + help='name of the project or path (external)') + run_fuzzer_parser.add_argument('fuzzer_name', help='name of the fuzzer') + run_fuzzer_parser.add_argument('fuzzer_args', + help='arguments to pass to the fuzzer', + nargs='*') + + fuzzbench_run_fuzzer_parser = subparsers.add_parser('fuzzbench_run_fuzzer') + _add_architecture_args(fuzzbench_run_fuzzer_parser) + fuzzbench_run_fuzzer_parser.add_argument('--engine') + _add_sanitizer_args(fuzzbench_run_fuzzer_parser) + _add_environment_args(fuzzbench_run_fuzzer_parser) + _add_external_project_args(fuzzbench_run_fuzzer_parser) + fuzzbench_run_fuzzer_parser.add_argument( + '--corpus-dir', help='directory to store corpus for the fuzz target') + fuzzbench_run_fuzzer_parser.add_argument( + 'project', help='name of the project or path (external)') + fuzzbench_run_fuzzer_parser.add_argument('fuzzer_name', + help='name of the fuzzer') + fuzzbench_run_fuzzer_parser.add_argument( + 'fuzzer_args', help='arguments to pass to the fuzzer', nargs='*') + + fuzzbench_measure_parser = subparsers.add_parser('fuzzbench_measure') + fuzzbench_measure_parser.add_argument( + 'project', help='name of the project or path (external)') + fuzzbench_measure_parser.add_argument('engine_name', + help='name of the fuzzer') + fuzzbench_measure_parser.add_argument('fuzz_target_name', + help='name of the fuzzer') + + coverage_parser = subparsers.add_parser( + 'coverage', help='Generate code coverage report for the project.') + coverage_parser.add_argument('--no-corpus-download', + action='store_true', + help='do not download corpus backup from ' + 'OSS-Fuzz; use corpus located in ' + 'build/corpus///') + coverage_parser.add_argument('--no-serve', + action='store_true', + help='do not serve a local HTTP server.') + coverage_parser.add_argument('--port', + default='8008', + help='specify port for' + ' a local HTTP server rendering coverage report') + coverage_parser.add_argument('--fuzz-target', + help='specify name of a fuzz ' + 'target to be run for generating coverage ' + 'report') + coverage_parser.add_argument('--corpus-dir', + help='specify location of corpus' + ' to be used (requires --fuzz-target argument)') + coverage_parser.add_argument('--public', + action='store_true', + help='if set, will download public ' + 'corpus using wget') + coverage_parser.add_argument('project', + help='name of the project or path (external)') + coverage_parser.add_argument('extra_args', + help='additional arguments to ' + 'pass to llvm-cov utility.', + nargs='*') + _add_external_project_args(coverage_parser) + _add_architecture_args(coverage_parser) + + introspector_parser = subparsers.add_parser( + 'introspector', + help='Run a complete end-to-end run of ' + 'fuzz introspector. This involves (1) ' + 'building the fuzzers with ASAN; (2) ' + 'running all fuzzers; (3) building ' + 'fuzzers with coverge; (4) extracting ' + 'coverage; (5) building fuzzers using ' + 'introspector') + introspector_parser.add_argument('project', help='name of the project') + introspector_parser.add_argument('--seconds', + help='number of seconds to run fuzzers', + default=10) + introspector_parser.add_argument('source_path', + help='path of local source', + nargs='?') + introspector_parser.add_argument( + '--public-corpora', + help='if specified, will use public corpora for code coverage', + default=False, + action='store_true') + introspector_parser.add_argument( + '--private-corpora', + help='if specified, will use private corpora', + default=False, + action='store_true') + + download_corpora_parser = subparsers.add_parser( + 'download_corpora', help='Download all corpora for a project.') + download_corpora_parser.add_argument('--fuzz-target', + nargs='+', + help='specify name of a fuzz target') + download_corpora_parser.add_argument('--public', + action='store_true', + help='if set, will download public ' + 'corpus using wget') + download_corpora_parser.add_argument( + 'project', help='name of the project or path (external)') + + reproduce_parser = subparsers.add_parser('reproduce', + help='Reproduce a crash.') + reproduce_parser.add_argument('--valgrind', + action='store_true', + help='run with valgrind') + reproduce_parser.add_argument('--propagate_exit_codes', + action='store_true', + default=False, + help='return underlying exit codes instead of True/False.') + reproduce_parser.add_argument('--not_privileged', + dest='privileged', + action='store_false', + default=True, + help='reproduce without running docker in privileged mode.') + reproduce_parser.add_argument('--err_result', + help='exit code override for missing harness / fuzz targets ' + '(default err_result = 1).', + type=int) + reproduce_parser.add_argument('--timeout', + help='timeout for reproduce subprocess ' + '(default: None).', + default=None, + type=int) + reproduce_parser.add_argument('project', + help='name of the project or path (external)') + reproduce_parser.add_argument('fuzzer_name', help='name of the fuzzer') + reproduce_parser.add_argument('testcase_path', help='path of local testcase') + reproduce_parser.add_argument('fuzzer_args', + help='arguments to pass to the fuzzer', + nargs='*') + _add_environment_args(reproduce_parser) + _add_external_project_args(reproduce_parser) + _add_architecture_args(reproduce_parser) + + shell_parser = subparsers.add_parser( + 'shell', help='Run /bin/bash within the builder container.') + shell_parser.add_argument('project', + help='name of the project or path (external)') + shell_parser.add_argument('source_path', + help='path of local source', + nargs='?') + shell_parser.add_argument('--docker_image_tag', + dest='docker_image_tag', + default='latest', + help='docker image build tag' + 'default: latest') + _add_architecture_args(shell_parser) + _add_engine_args(shell_parser) + _add_sanitizer_args(shell_parser) + _add_environment_args(shell_parser) + _add_external_project_args(shell_parser) + + run_clusterfuzzlite_parser = subparsers.add_parser( + 'run_clusterfuzzlite', help='Run ClusterFuzzLite on a project.') + _add_sanitizer_args(run_clusterfuzzlite_parser) + _add_environment_args(run_clusterfuzzlite_parser) + run_clusterfuzzlite_parser.add_argument('project') + run_clusterfuzzlite_parser.add_argument('--clean', + dest='clean', + action='store_true', + help='clean existing artifacts.') + run_clusterfuzzlite_parser.add_argument( + '--no-clean', + dest='clean', + action='store_false', + help='do not clean existing artifacts ' + '(default).') + run_clusterfuzzlite_parser.add_argument('--branch', + default='master', + required=True) + _add_external_project_args(run_clusterfuzzlite_parser) + run_clusterfuzzlite_parser.set_defaults(clean=False) + + subparsers.add_parser('pull_images', help='Pull base images.') + return parser + + +def is_base_image(image_name): + """Checks if the image name is a base image.""" + return os.path.exists(os.path.join('infra', 'base-images', image_name)) + + +def check_project_exists(project): + """Checks if a project exists.""" + if os.path.exists(project.path): + return True + + if project.is_external: + descriptive_project_name = project.path + else: + descriptive_project_name = project.name + + logger.error('"%s" does not exist.', descriptive_project_name) + return False + + +def _check_fuzzer_exists(project, fuzzer_name, architecture='x86_64'): + """Checks if a fuzzer exists.""" + platform = 'linux/arm64' if architecture == 'aarch64' else 'linux/amd64' + command = ['docker', 'run', '--rm', '--platform', platform] + command.extend(['-v', '%s:/out' % project.out]) + command.append(BASE_RUNNER_IMAGE) + + command.extend(['/bin/bash', '-c', 'test -f /out/%s' % fuzzer_name]) + + try: + subprocess.check_call(command) + except subprocess.CalledProcessError: + logger.error('%s does not seem to exist. Please run build_fuzzers first.', + fuzzer_name) + return False + + return True + + +def _normalized_name(name): + """Return normalized name with special chars like slash, colon, etc normalized + to hyphen(-). This is important as otherwise these chars break local and cloud + storage paths.""" + return SPECIAL_CHARS_REGEX.sub('-', name).strip('-') + + +def _get_absolute_path(path): + """Returns absolute path with user expansion.""" + return os.path.abspath(os.path.expanduser(path)) + + +def _get_command_string(command): + """Returns a shell escaped command string.""" + return ' '.join(shlex.quote(part) for part in command) + + +def _get_project_build_subdir(project, subdir_name): + """Creates the |subdir_name| subdirectory of the |project| subdirectory in + |BUILD_DIR| and returns its path.""" + directory = os.path.join(BUILD_DIR, subdir_name, project) + os.makedirs(directory, exist_ok=True) + + return directory + + +def _get_out_dir(project=''): + """Creates and returns path to /out directory for the given project (if + specified).""" + return _get_project_build_subdir(project, 'out') + + +def _add_architecture_args(parser, choices=None): + """Adds common architecture args.""" + if choices is None: + choices = constants.ARCHITECTURES + parser.add_argument('--architecture', + default=constants.DEFAULT_ARCHITECTURE, + choices=choices) + + +def _add_engine_args(parser, choices=None): + """Adds common engine args.""" + if choices is None: + choices = constants.ENGINES + parser.add_argument('--engine', + default=constants.DEFAULT_ENGINE, + choices=choices) + + +def _add_sanitizer_args(parser, choices=None): + """Adds common sanitizer args.""" + if choices is None: + choices = constants.SANITIZERS + parser.add_argument('--sanitizer', + default=None, + choices=choices, + help='the default is "address"') + + +def _add_environment_args(parser): + """Adds common environment args.""" + parser.add_argument('-e', + action='append', + help="set environment variable e.g. VAR=value") + + +def build_image_impl(project, cache=True, pull=False, + architecture='x86_64', + docker_image_tag='latest'): + """Builds image.""" + image_name = project.name + + if is_base_image(image_name): + image_project = 'aixcc-finals' + docker_build_dir = os.path.join(OSS_FUZZ_DIR, 'infra', 'base-images', + image_name) + dockerfile_path = os.path.join(docker_build_dir, 'Dockerfile') + image_name = 'ghcr.io/%s/%s%s' % (image_project, image_name, BASE_IMAGE_TAG) + else: + if not check_project_exists(project): + return False + dockerfile_path = project.dockerfile_path + docker_build_dir = project.path + image_project = 'aixcc-afc' + image_name = '%s/%s:%s' % (image_project, image_name, docker_image_tag) + + if pull and not pull_images(project.language): + return False + + build_args = [] + if architecture == 'aarch64': + build_args += [ + 'buildx', + 'build', + '--platform', + 'linux/arm64', + '--progress', + 'plain', + '--load', + ] + if not cache: + build_args.append('--no-cache') + + build_args += ['-t', image_name, '--file', dockerfile_path] + build_args.append(docker_build_dir) + + if architecture == 'aarch64': + command = ['docker'] + build_args + subprocess.check_call(command) + return True + return docker_build(build_args) + + +def _env_to_docker_args(env_list): + """Turns envirnoment variable list into docker arguments.""" + return sum([['-e', v] for v in env_list], []) + + +def workdir_from_lines(lines, default='/src'): + """Gets the WORKDIR from the given lines.""" + for line in reversed(lines): # reversed to get last WORKDIR. + match = re.match(WORKDIR_REGEX, line) + if match: + workdir = match.group(1) + workdir = workdir.replace('$SRC', '/src') + + if not os.path.isabs(workdir): + workdir = os.path.join('/src', workdir) + + return os.path.normpath(workdir) + + return default + + +def _workdir_from_dockerfile(project): + """Parses WORKDIR from the Dockerfile for the given project.""" + with open(project.dockerfile_path) as file_handle: + lines = file_handle.readlines() + + return workdir_from_lines(lines, default=os.path.join('/src', project.name)) + + +def prepare_aarch64_emulation(): + """Run some necessary commands to use buildx to build AArch64 targets using + QEMU emulation on an x86_64 host.""" + subprocess.check_call( + ['docker', 'buildx', 'create', '--name', ARM_BUILDER_NAME]) + subprocess.check_call(['docker', 'buildx', 'use', ARM_BUILDER_NAME]) + + +def docker_run(run_args, print_output=True, architecture='x86_64', propagate_exit_codes=False, privileged=True, timeout=None): + """Calls `docker run`.""" + platform = 'linux/arm64' if architecture == 'aarch64' else 'linux/amd64' + + if privileged: + command = [ + 'docker', 'run', '--privileged', '--shm-size=2g', '--platform', platform + ] + else: + command = [ + 'docker', 'run', '--shm-size=2g', '--platform', platform + ] + + if os.getenv('OSS_FUZZ_SAVE_CONTAINERS_NAME'): + command.append('--name') + command.append(os.getenv('OSS_FUZZ_SAVE_CONTAINERS_NAME')) + else: + command.append('--rm') + + # Support environments with a TTY. + if sys.stdin.isatty(): + command.append('-i') + + command.extend(run_args) + + logger.info('Running: %s.', _get_command_string(command)) + stdout = None + if not print_output: + stdout = open(os.devnull, 'w') + + exit_code = 0 + + try: + subprocess.check_call(command, stdout=stdout, stderr=subprocess.STDOUT, + timeout=timeout) + except subprocess.CalledProcessError as e: + print(f'subprocess command returned a non-zero exit status: {e.returncode}') + exit_code = e.returncode + except subprocess.TimeoutExpired: + print(f'subprocess command timed out: {timeout=}') + exit_code = 124 + + return exit_code if propagate_exit_codes else exit_code == 0 + + +def docker_build(build_args): + """Calls `docker build`.""" + command = ['docker', 'build'] + command.extend(build_args) + logger.info('Running: %s.', _get_command_string(command)) + + try: + subprocess.check_call(command) + except subprocess.CalledProcessError: + logger.error('Docker build failed.') + return False + + return True + + +def docker_pull(image): + """Call `docker pull`.""" + command = ['docker', 'pull', image] + logger.info('Running: %s', _get_command_string(command)) + + try: + subprocess.check_call(command) + except subprocess.CalledProcessError: + logger.error('Docker pull failed.') + return False + + return True + + +def build_image(args): + """Builds docker image.""" + if args.pull and args.no_pull: + logger.error('Incompatible arguments --pull and --no-pull.') + return False + + if args.pull: + pull = True + elif args.no_pull: + pull = False + else: + y_or_n = raw_input('Pull latest base images (compiler/runtime)? (y/N): ') + pull = y_or_n.lower() == 'y' + + if pull: + logger.info('Pulling latest base images...') + else: + logger.info('Using cached base images...') + + # If build_image is called explicitly, don't use cache. + if build_image_impl(args.project, + cache=args.cache, + pull=pull, + architecture=args.architecture, + docker_image_tag=args.docker_image_tag): + return True + + return False + + +def build_fuzzers_impl( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches + project, + clean, + engine, + sanitizer, + architecture, + env_to_add, + source_path, + mount_path=None, + child_dir='', + build_project_image=True, + docker_image_tag='latest'): + """Builds fuzzers.""" + if build_project_image and not build_image_impl(project, + architecture=architecture, + docker_image_tag=docker_image_tag): + return False + + docker_image = f'aixcc-afc/{project.name}:{docker_image_tag}' + + project_out = os.path.join(project.out, child_dir) + if clean: + logger.info('Cleaning existing build artifacts.') + + # Clean old and possibly conflicting artifacts in project's out directory. + docker_run([ + '-v', f'{project_out}:/out', '-t', f'{docker_image}', + '/bin/bash', '-c', 'rm -rf /out/*' + ], + architecture=architecture) + + docker_run([ + '-v', + '%s:/work' % project.work, '-t', + f'{docker_image}', '/bin/bash', '-c', 'rm -rf /work/*' + ], + architecture=architecture) + + else: + logger.info('Keeping existing build artifacts as-is (if any).') + env = [ + 'FUZZING_ENGINE=' + engine, + 'SANITIZER=' + sanitizer, + 'ARCHITECTURE=' + architecture, + 'PROJECT_NAME=' + project.name, + 'HELPER=True', + ] + + _add_oss_fuzz_ci_if_needed(env) + + if project.language: + env.append('FUZZING_LANGUAGE=' + project.language) + + if env_to_add: + env += env_to_add + + command = _env_to_docker_args(env) + if source_path: + workdir = _workdir_from_dockerfile(project) + stateless_path = mount_path if mount_path else workdir + + if stateless_path == '/src': + logger.error('Cannot mount local source targeting "/src".') + return False + + command += [ + '-v', + '%s:%s:ro' % (_get_absolute_path(source_path), '/local-source-mount'), + ] + + command += [ + '-v', f'{project_out}:/out', '-v', f'{project.work}:/work', + f'{docker_image}' + ] + + if sys.stdin.isatty(): + command.insert(-1, '-t') + + if source_path: + default_cmd = 'compile' + command += [ + '/bin/bash', + '-c', + f'pushd $SRC && rm -rf {stateless_path} && cp -r /local-source-mount {stateless_path} && popd && {default_cmd}' + ] + + result = docker_run(command, architecture=architecture) + if not result: + logger.error('Building fuzzers failed.') + return False + + return True + + +def run_clusterfuzzlite(args): + """Runs ClusterFuzzLite on a local repo.""" + if not os.path.exists(CLUSTERFUZZLITE_FILESTORE_DIR): + os.mkdir(CLUSTERFUZZLITE_FILESTORE_DIR) + + try: + with tempfile.TemporaryDirectory() as workspace: + + if args.external: + project_src_path = os.path.join(workspace, args.project.name) + shutil.copytree(args.project.path, project_src_path) + + build_command = [ + '--tag', 'ghcr.io/aixcc-finals/cifuzz-run-fuzzers', '--file', + 'infra/run_fuzzers.Dockerfile', 'infra' + ] + if not docker_build(build_command): + return False + filestore_path = os.path.abspath(CLUSTERFUZZLITE_FILESTORE_DIR) + docker_run_command = [] + if args.external: + docker_run_command += [ + '-e', + f'PROJECT_SRC_PATH={project_src_path}', + ] + else: + docker_run_command += [ + '-e', + f'OSS_FUZZ_PROJECT_NAME={args.project.name}', + ] + docker_run_command += [ + '-v', + f'{filestore_path}:{filestore_path}', + '-v', + f'{workspace}:{workspace}', + '-e', + f'FILESTORE_ROOT_DIR={filestore_path}', + '-e', + f'WORKSPACE={workspace}', + '-e', + f'REPOSITORY={args.project.name}', + '-e', + 'CFL_PLATFORM=standalone', + '--entrypoint', + '', + '-v', + '/var/run/docker.sock:/var/run/docker.sock', + CLUSTERFUZZLITE_DOCKER_IMAGE, + 'python3', + '/opt/oss-fuzz/infra/cifuzz/cifuzz_combined_entrypoint.py', + ] + return docker_run(docker_run_command) + + except PermissionError as error: + logger.error('PermissionError: %s.', error) + # Tempfile can't delete the workspace because of a permissions issue. This + # is because docker creates files in the workspace that are owned by root + # but this process is probably being run as another user. Use a docker image + # to delete the temp directory (workspace) so that we have permission. + docker_run([ + '-v', f'{workspace}:{workspace}', '--entrypoint', '', + CLUSTERFUZZLITE_DOCKER_IMAGE, 'rm', '-rf', + os.path.join(workspace, '*') + ]) + return False + + +def build_fuzzers(args): + """Builds fuzzers.""" + if args.engine == 'centipede' and args.sanitizer != 'none': + # Centipede always requires separate binaries for sanitizers: + # An unsanitized binary, which Centipede requires for fuzzing. + # A sanitized binary, placed in the child directory. + sanitized_binary_directories = ( + ('none', ''), + (args.sanitizer, f'__centipede_{args.sanitizer}'), + ) + else: + # Generally, a fuzzer only needs one sanitized binary in the default dir. + sanitized_binary_directories = ((args.sanitizer, ''),) + return all( + build_fuzzers_impl(args.project, + args.clean, + args.engine, + sanitizer, + args.architecture, + args.e, + args.source_path, + mount_path=args.mount_path, + child_dir=child_dir, + docker_image_tag=args.docker_image_tag) + for sanitizer, child_dir in sanitized_binary_directories) + + +def fuzzbench_build_fuzzers(args): + """Builds fuzz targets with an arbitrary fuzzer from FuzzBench.""" + with tempfile.TemporaryDirectory() as tmp_dir: + tmp_dir = os.path.abspath(tmp_dir) + fuzzbench_path = os.path.join(tmp_dir, 'fuzzbench') + subprocess.run([ + 'git', 'clone', 'https://github.com/google/fuzzbench', '--depth', '1', + fuzzbench_path + ], + check=True) + env = [ + f'FUZZBENCH_PATH={fuzzbench_path}', 'OSS_FUZZ_ON_DEMAND=1', + f'PROJECT={args.project.name}' + ] + tag = f'aixcc-afc/{args.project.name}' + subprocess.run([ + 'docker', 'tag', 'ghcr.io/aixcc-finals/base-builder-fuzzbench', + f'ghcr.io/aixcc-finals/base-builder{BASE_IMAGE_TAG}' + ], + check=True) + build_image_impl(args.project) + assert docker_build([ + '--tag', tag, '--build-arg', f'parent_image={tag}', '--file', + os.path.join(fuzzbench_path, 'fuzzers', args.engine, + 'builder.Dockerfile'), + os.path.join(fuzzbench_path, 'fuzzers', args.engine) + ]) + + return build_fuzzers_impl(args.project, + False, + args.engine, + args.sanitizer, + args.architecture, + env, + source_path=fuzzbench_path, + mount_path=fuzzbench_path, + build_project_image=False) + + +def _add_oss_fuzz_ci_if_needed(env): + """Adds value of |OSS_FUZZ_CI| environment variable to |env| if it is set.""" + oss_fuzz_ci = os.getenv('OSS_FUZZ_CI') + if oss_fuzz_ci: + env.append('OSS_FUZZ_CI=' + oss_fuzz_ci) + + +def check_build(args): + """Checks that fuzzers in the container execute without errors.""" + if not check_project_exists(args.project): + return False + + if (args.fuzzer_name and not _check_fuzzer_exists( + args.project, args.fuzzer_name, args.architecture)): + return False + + env = [ + 'FUZZING_ENGINE=' + args.engine, + 'SANITIZER=' + args.sanitizer, + 'ARCHITECTURE=' + args.architecture, + 'FUZZING_LANGUAGE=' + args.project.language, + 'HELPER=True', + ] + _add_oss_fuzz_ci_if_needed(env) + if args.e: + env += args.e + + run_args = _env_to_docker_args(env) + [ + '-v', f'{args.project.out}:/out', '-t', BASE_RUNNER_IMAGE + ] + + if args.fuzzer_name: + run_args += ['test_one.py', args.fuzzer_name] + else: + run_args.append('test_all.py') + + result = docker_run(run_args, architecture=args.architecture) + if result: + logger.info('Check build passed.') + else: + logger.error('Check build failed.') + + return result + + +def _get_fuzz_targets(project): + """Returns names of fuzz targest build in the project's /out directory.""" + fuzz_targets = [] + for name in os.listdir(project.out): + if name.startswith('afl-'): + continue + if name == 'centipede': + continue + if name.startswith('jazzer_'): + continue + if name == 'llvm-symbolizer': + continue + + path = os.path.join(project.out, name) + # Python and JVM fuzz targets are only executable for the root user, so + # we can't use os.access. + if os.path.isfile(path) and (os.stat(path).st_mode & 0o111): + fuzz_targets.append(name) + + return fuzz_targets + + +def _get_latest_corpus(project, fuzz_target, base_corpus_dir): + """Downloads the latest corpus for the given fuzz target.""" + corpus_dir = os.path.join(base_corpus_dir, fuzz_target) + os.makedirs(corpus_dir, exist_ok=True) + + if not fuzz_target.startswith(project.name + '_'): + fuzz_target = '%s_%s' % (project.name, fuzz_target) + + # Normalise fuzz target name. + fuzz_target = _normalized_name(fuzz_target) + + corpus_backup_url = CORPUS_BACKUP_URL_FORMAT.format(project_name=project.name, + fuzz_target=fuzz_target) + command = ['gsutil', 'ls', corpus_backup_url] + + # Don't capture stderr. We want it to print in real time, in case gsutil is + # asking for two-factor authentication. + corpus_listing = subprocess.Popen(command, stdout=subprocess.PIPE) + output, _ = corpus_listing.communicate() + + # Some fuzz targets (e.g. new ones) may not have corpus yet, just skip those. + if corpus_listing.returncode: + logger.warning('Corpus for %s not found:\n', fuzz_target) + return + + if output: + latest_backup_url = output.splitlines()[-1] + archive_path = corpus_dir + '.zip' + command = ['gsutil', '-q', 'cp', latest_backup_url, archive_path] + subprocess.check_call(command) + + command = ['unzip', '-q', '-o', archive_path, '-d', corpus_dir] + subprocess.check_call(command) + os.remove(archive_path) + else: + # Sync the working corpus copy if a minimized backup is not available. + corpus_url = CORPUS_URL_FORMAT.format(project_name=project.name, + fuzz_target=fuzz_target) + command = ['gsutil', '-m', '-q', 'rsync', '-R', corpus_url, corpus_dir] + subprocess.check_call(command) + + +def _get_latest_public_corpus(args, fuzzer): + """Downloads the public corpus""" + target_corpus_dir = "build/corpus/%s" % args.project.name + if not os.path.isdir(target_corpus_dir): + os.makedirs(target_corpus_dir) + + target_zip = os.path.join(target_corpus_dir, fuzzer + ".zip") + + project_qualified_fuzz_target_name = fuzzer + qualified_name_prefix = args.project.name + '_' + if not fuzzer.startswith(qualified_name_prefix): + project_qualified_fuzz_target_name = qualified_name_prefix + fuzzer + + download_url = HTTPS_CORPUS_BACKUP_URL_FORMAT.format( + project_name=args.project.name, + fuzz_target=project_qualified_fuzz_target_name) + + cmd = ['wget', download_url, '-O', target_zip] + try: + with open(os.devnull, 'w') as stdout: + subprocess.check_call(cmd, stdout=stdout) + except OSError: + logger.error('Failed to download corpus') + + target_fuzzer_dir = os.path.join(target_corpus_dir, fuzzer) + if not os.path.isdir(target_fuzzer_dir): + os.mkdir(target_fuzzer_dir) + + target_corpus_dir = os.path.join(target_corpus_dir, fuzzer) + try: + with open(os.devnull, 'w') as stdout: + subprocess.check_call( + ['unzip', '-q', '-o', target_zip, '-d', target_fuzzer_dir], + stdout=stdout) + except OSError: + logger.error('Failed to unzip corpus') + + # Remove the downloaded zip + os.remove(target_zip) + return True + + +def download_corpora(args): + """Downloads most recent corpora from GCS for the given project.""" + if not check_project_exists(args.project): + return False + + if args.public: + logger.info("Downloading public corpus") + try: + with open(os.devnull, 'w') as stdout: + subprocess.check_call(['wget', '--version'], stdout=stdout) + except OSError: + logger.error('wget not found') + return False + else: + try: + with open(os.devnull, 'w') as stdout: + subprocess.check_call(['gsutil', '--version'], stdout=stdout) + except OSError: + logger.error('gsutil not found. Please install it from ' + 'https://cloud.google.com/storage/docs/gsutil_install') + return False + + if args.fuzz_target: + fuzz_targets = args.fuzz_target + else: + fuzz_targets = _get_fuzz_targets(args.project) + + if not fuzz_targets: + logger.error( + 'Fuzz targets not found. Please build project first ' + '(python3 infra/helper.py build_fuzzers %s) so that download_corpora ' + 'can automatically identify targets.', args.project.name) + return False + + corpus_dir = args.project.corpus + + def _download_for_single_target(fuzz_target): + try: + if args.public: + _get_latest_public_corpus(args, fuzz_target) + else: + _get_latest_corpus(args.project, fuzz_target, corpus_dir) + return True + except Exception as error: # pylint:disable=broad-except + logger.error('Corpus download for %s failed: %s.', fuzz_target, + str(error)) + return False + + logger.info('Downloading corpora for %s project to %s.', args.project.name, + corpus_dir) + thread_pool = ThreadPool() + return all(thread_pool.map(_download_for_single_target, fuzz_targets)) + + +def coverage(args): # pylint: disable=too-many-branches + """Generates code coverage using clang source based code coverage.""" + if args.corpus_dir and not args.fuzz_target: + logger.error( + '--corpus-dir requires specifying a particular fuzz target using ' + '--fuzz-target') + return False + + if not check_project_exists(args.project): + return False + + if args.project.language not in constants.LANGUAGES_WITH_COVERAGE_SUPPORT: + logger.error( + 'Project is written in %s, coverage for it is not supported yet.', + args.project.language) + return False + + if (not args.no_corpus_download and not args.corpus_dir and + not args.project.is_external): + if not download_corpora(args): + return False + + extra_cov_args = ( + f'{args.project.coverage_extra_args.strip()} {" ".join(args.extra_args)}') + env = [ + 'FUZZING_ENGINE=libfuzzer', + 'HELPER=True', + 'FUZZING_LANGUAGE=%s' % args.project.language, + 'PROJECT=%s' % args.project.name, + 'SANITIZER=coverage', + 'COVERAGE_EXTRA_ARGS=%s' % extra_cov_args, + 'ARCHITECTURE=' + args.architecture, + ] + + if not args.no_serve: + env.append(f'HTTP_PORT={args.port}') + + run_args = _env_to_docker_args(env) + + if args.port: + run_args.extend([ + '-p', + '%s:%s' % (args.port, args.port), + ]) + + if args.corpus_dir: + if not os.path.exists(args.corpus_dir): + logger.error('The path provided in --corpus-dir argument does not ' + 'exist.') + return False + corpus_dir = os.path.realpath(args.corpus_dir) + run_args.extend(['-v', '%s:/corpus/%s' % (corpus_dir, args.fuzz_target)]) + else: + run_args.extend(['-v', '%s:/corpus' % args.project.corpus]) + + run_args.extend([ + '-v', + '%s:/out' % args.project.out, + '-t', + BASE_RUNNER_IMAGE, + ]) + + run_args.append('coverage') + if args.fuzz_target: + run_args.append(args.fuzz_target) + + result = docker_run(run_args, architecture=args.architecture) + if result: + logger.info('Successfully generated clang code coverage report.') + else: + logger.error('Failed to generate clang code coverage report.') + + return result + + +def _introspector_prepare_corpus(args): + """Helper function for introspector runs to generate corpora.""" + parser = get_parser() + # Generate corpus, either by downloading or running fuzzers. + if args.private_corpora or args.public_corpora: + corpora_command = ['download_corpora'] + if args.public_corpora: + corpora_command.append('--public') + corpora_command.append(args.project.name) + if not download_corpora(parse_args(parser, corpora_command)): + logger.error('Failed to download corpora') + return False + else: + fuzzer_targets = _get_fuzz_targets(args.project) + for fuzzer_name in fuzzer_targets: + # Make a corpus directory. + fuzzer_corpus_dir = args.project.corpus + f'/{fuzzer_name}' + if not os.path.isdir(fuzzer_corpus_dir): + os.makedirs(fuzzer_corpus_dir) + run_fuzzer_command = [ + 'run_fuzzer', '--sanitizer', 'address', '--corpus-dir', + fuzzer_corpus_dir, args.project.name, fuzzer_name + ] + + parsed_args = parse_args(parser, run_fuzzer_command) + parsed_args.fuzzer_args = [ + f'-max_total_time={args.seconds}', '-detect_leaks=0' + ] + # Continue even if run command fails, because we do not have 100% + # accuracy in fuzz target detection, i.e. we might try to run something + # that is not a target. + run_fuzzer(parsed_args) + return True + + +def introspector(args): + """Runs a complete end-to-end run of introspector.""" + parser = get_parser() + + args_to_append = [] + if args.source_path: + args_to_append.append(_get_absolute_path(args.source_path)) + + # Build fuzzers with ASAN. + build_fuzzers_command = [ + 'build_fuzzers', '--sanitizer=address', args.project.name + ] + args_to_append + if not build_fuzzers(parse_args(parser, build_fuzzers_command)): + logger.error('Failed to build project with ASAN') + return False + + if not _introspector_prepare_corpus(args): + return False + + # Build code coverage. + build_fuzzers_command = [ + 'build_fuzzers', '--sanitizer=coverage', args.project.name + ] + args_to_append + if not build_fuzzers(parse_args(parser, build_fuzzers_command)): + logger.error('Failed to build project with coverage instrumentation') + return False + + # Collect coverage. + coverage_command = [ + 'coverage', '--no-corpus-download', '--port', '', args.project.name + ] + if not coverage(parse_args(parser, coverage_command)): + logger.error('Failed to extract coverage') + return False + + # Build introspector. + build_fuzzers_command = [ + 'build_fuzzers', '--sanitizer=introspector', args.project.name + ] + args_to_append + if not build_fuzzers(parse_args(parser, build_fuzzers_command)): + logger.error('Failed to build project with introspector') + return False + + introspector_dst = os.path.join(args.project.out, + "introspector-report/inspector") + shutil.rmtree(introspector_dst, ignore_errors=True) + shutil.copytree(os.path.join(args.project.out, "inspector"), introspector_dst) + + # Copy the coverage reports into the introspector report. + dst_cov_report = os.path.join(introspector_dst, "covreport") + shutil.copytree(os.path.join(args.project.out, "report"), dst_cov_report) + + # Copy per-target coverage reports + src_target_cov_report = os.path.join(args.project.out, "report_target") + for target_cov_dir in os.listdir(src_target_cov_report): + dst_target_cov_report = os.path.join(dst_cov_report, target_cov_dir) + shutil.copytree(os.path.join(src_target_cov_report, target_cov_dir), + dst_target_cov_report) + + logger.info('Introspector run complete. Report in %s', introspector_dst) + logger.info( + 'To browse the report, run: `python3 -m http.server 8008 --directory %s`' + 'and navigate to localhost:8008/fuzz_report.html in your browser', + introspector_dst) + return True + + +def run_fuzzer(args): + """Runs a fuzzer in the container.""" + if not check_project_exists(args.project): + return False + + if not _check_fuzzer_exists(args.project, args.fuzzer_name, + args.architecture): + return False + + env = [ + 'FUZZING_ENGINE=' + args.engine, + 'SANITIZER=' + args.sanitizer, + 'RUN_FUZZER_MODE=interactive', + 'HELPER=True', + ] + + if args.e: + env += args.e + + run_args = _env_to_docker_args(env) + + if args.corpus_dir: + if not os.path.exists(args.corpus_dir): + logger.error('The path provided in --corpus-dir argument does not exist') + return False + corpus_dir = os.path.realpath(args.corpus_dir) + run_args.extend([ + '-v', + '{corpus_dir}:/tmp/{fuzzer}_corpus'.format(corpus_dir=corpus_dir, + fuzzer=args.fuzzer_name) + ]) + + run_args.extend([ + '-v', + '%s:/out' % args.project.out, + '-t', + BASE_RUNNER_IMAGE, + 'run_fuzzer', + args.fuzzer_name, + ] + args.fuzzer_args) + + return docker_run(run_args, architecture=args.architecture) + + +def fuzzbench_run_fuzzer(args): + """Runs a fuzz target built by fuzzbench in the container.""" + if not check_project_exists(args.project): + return False + + env = [ + 'FUZZING_ENGINE=' + args.engine, + 'SANITIZER=' + args.sanitizer, + 'RUN_FUZZER_MODE=interactive', + 'HELPER=True', + f'FUZZ_TARGET={args.fuzzer_name}', + f'BENCHMARK={args.project.name}', + 'TRIAL_ID=1', + 'EXPERIMENT_TYPE=bug', + ] + + if args.e: + env += args.e + + run_args = _env_to_docker_args(env) + + if args.corpus_dir: + if not os.path.exists(args.corpus_dir): + logger.error('The path provided in --corpus-dir argument does not exist') + return False + corpus_dir = os.path.realpath(args.corpus_dir) + run_args.extend([ + '-v', + '{corpus_dir}:/tmp/{fuzzer}_corpus'.format(corpus_dir=corpus_dir, + fuzzer=args.fuzzer_name) + ]) + + with tempfile.TemporaryDirectory() as tmp_dir: + tmp_dir = os.path.abspath(tmp_dir) + fuzzbench_path = os.path.join(tmp_dir, 'fuzzbench') + subprocess.run([ + 'git', 'clone', 'https://github.com/google/fuzzbench', '--depth', '1', + fuzzbench_path + ], + check=True) + run_args.extend([ + '-v', + f'{args.project.out}:/out', + '-v', + f'{fuzzbench_path}:{fuzzbench_path}', + '-e', + f'FUZZBENCH_PATH={fuzzbench_path}', + f'aixcc-afc/{args.project.name}', + 'fuzzbench_run_fuzzer', + args.fuzzer_name, + ] + args.fuzzer_args) + + return docker_run(run_args, architecture=args.architecture) + + +def fuzzbench_measure(args): + """Measure results from fuzzing with fuzzbench.""" + if not check_project_exists(args.project): + return False + + with tempfile.TemporaryDirectory() as tmp_dir: + tmp_dir = os.path.abspath(tmp_dir) + fuzzbench_path = os.path.join(tmp_dir, 'fuzzbench') + subprocess.run([ + 'git', 'clone', 'https://github.com/google/fuzzbench', '--depth', '1', + fuzzbench_path + ], + check=True) + run_args = [ + '-v', f'{args.project.out}:/out', '-v', + f'{fuzzbench_path}:{fuzzbench_path}', '-e', + f'FUZZBENCH_PATH={fuzzbench_path}', '-e', 'EXPERIMENT_TYPE=bug', '-e', + f'FUZZ_TARGET={args.fuzz_target_name}', '-e', + f'FUZZER={args.engine_name}', '-e', f'BENCHMARK={args.project.name}', + f'aixcc-afc/{args.project.name}', 'fuzzbench_measure' + ] + + return docker_run(run_args, 'x86_64') + + +def reproduce(args): + """Reproduces a specific test case from a specific project.""" + return reproduce_impl(args.project, args.fuzzer_name, args.valgrind, args.e, + args.fuzzer_args, args.testcase_path, args.architecture, + args.propagate_exit_codes, args.err_result, + privileged=args.privileged, timeout=args.timeout) + + +def reproduce_impl( # pylint: disable=too-many-arguments + project, + fuzzer_name, + valgrind, + env_to_add, + fuzzer_args, + testcase_path, + architecture='x86_64', + propagate_exit_codes=False, + err_result=1, + run_function=docker_run, + privileged=True, + timeout=None): + """Reproduces a testcase in the container.""" + + if not check_project_exists(project): + return err_result if propagate_exit_codes else False + + if not _check_fuzzer_exists(project, fuzzer_name, architecture): + return err_result if propagate_exit_codes else False + + debugger = '' + env = ['HELPER=True', 'ARCHITECTURE=' + architecture] + image_name = 'base-runner' + + if valgrind: + debugger = 'valgrind --tool=memcheck --track-origins=yes --leak-check=full' + + if debugger: + image_name = 'base-runner-debug' + env += ['DEBUGGER=' + debugger] + + if env_to_add: + env += env_to_add + + run_args = _env_to_docker_args(env) + [ + '-v', + '%s:/out' % project.out, + '-v', + '%s:/testcase' % _get_absolute_path(testcase_path), + '-t', + 'ghcr.io/aixcc-finals/%s%s' % (image_name, BASE_IMAGE_TAG), + 'reproduce', + fuzzer_name, + '-runs=100', + ] + fuzzer_args + + return run_function(run_args, architecture=architecture, propagate_exit_codes=propagate_exit_codes, privileged=privileged, timeout=timeout) + + +def _validate_project_name(project_name): + """Validates |project_name| is a valid OSS-Fuzz project name.""" + if len(project_name) > MAX_PROJECT_NAME_LENGTH: + logger.error( + 'Project name needs to be less than or equal to %d characters.', + MAX_PROJECT_NAME_LENGTH) + return False + + if not VALID_PROJECT_NAME_REGEX.match(project_name): + logger.info('Invalid project name: %s.', project_name) + return False + + return True + + +def _validate_language(language): + if not LANGUAGE_REGEX.match(language): + logger.error('Invalid project language %s.', language) + return False + + return True + + +def _create_build_integration_directory(directory): + """Returns True on successful creation of a build integration directory. + Suitable for OSS-Fuzz and external projects.""" + try: + os.makedirs(directory) + except OSError as error: + if error.errno != errno.EEXIST: + raise + logger.error('%s already exists.', directory) + return False + return True + + +def _template_project_file(filename, template, template_args, directory): + """Templates |template| using |template_args| and writes the result to + |directory|/|filename|. Sets the file to executable if |filename| is + build.sh.""" + file_path = os.path.join(directory, filename) + with open(file_path, 'w') as file_handle: + file_handle.write(template % template_args) + + if filename == 'build.sh': + os.chmod(file_path, 0o755) + + +def generate(args): + """Generates empty project files.""" + return _generate_impl(args.project, args.language) + + +def _get_current_datetime(): + """Returns this year. Needed for mocking.""" + return datetime.datetime.now() + + +def _base_builder_from_language(language): + """Returns the base builder for the specified language.""" + return LANGUAGE_TO_BASE_BUILDER_IMAGE[language] + + +def _generate_impl(project, language): + """Implementation of generate(). Useful for testing.""" + if project.is_external: + # External project. + project_templates = templates.EXTERNAL_TEMPLATES + else: + # Internal project. + if not _validate_project_name(project.name): + return False + project_templates = templates.TEMPLATES + + if not _validate_language(language): + return False + + directory = project.build_integration_path + if not _create_build_integration_directory(directory): + return False + + logger.info('Writing new files to: %s.', directory) + + template_args = { + 'project_name': project.name, + 'base_builder': _base_builder_from_language(language), + 'language': language, + 'year': _get_current_datetime().year + } + for filename, template in project_templates.items(): + _template_project_file(filename, template, template_args, directory) + return True + + +def shell(args): + """Runs a shell within a docker image.""" + if not build_image_impl(args.project): + return False + + env = [ + 'FUZZING_ENGINE=' + args.engine, + 'SANITIZER=' + args.sanitizer, + 'ARCHITECTURE=' + args.architecture, + 'HELPER=True', + ] + + if args.project.name != 'base-runner-debug': + env.append('FUZZING_LANGUAGE=' + args.project.language) + + if args.e: + env += args.e + + if is_base_image(args.project.name): + image_project = 'aixcc-finals' + project_full = 'ghcr.io/%s/%s%s' % (image_project, args.project.name, BASE_IMAGE_TAG) + out_dir = _get_out_dir() + else: + image_project = 'aixcc-afc' + project_full = '%s/%s:%s' % (image_project, args.project.name, args.docker_image_tag) + out_dir = args.project.out + + run_args = _env_to_docker_args(env) + if args.source_path: + workdir = _workdir_from_dockerfile(args.project) + run_args.extend([ + '-v', + '%s:%s' % (_get_absolute_path(args.source_path), workdir), + ]) + + + run_args.extend([ + '-v', + '%s:/out' % out_dir, '-v', + '%s:/work' % args.project.work, '-t', + '%s' % (project_full), '/bin/bash' + ]) + + docker_run(run_args, architecture=args.architecture) + return True + + +def pull_images(language=None): + """Pulls base images used to build projects in language lang (or all if lang + is None).""" + for base_image_lang, base_images in BASE_IMAGES.items(): + if (language is None or base_image_lang == 'generic' or + base_image_lang == language): + for base_image in base_images: + if not docker_pull(base_image): + return False + + return True + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/helper_test.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/helper_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ba3172103998f11ba3113ba25ff4faf9b3775539 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/helper_test.py @@ -0,0 +1,239 @@ +# Copyright 2021 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for helper.py""" + +import datetime +import os +import tempfile +import unittest +from unittest import mock + +from pyfakefs import fake_filesystem_unittest + +import constants +import helper +import templates + +# pylint: disable=no-self-use,protected-access + + +class ShellTest(unittest.TestCase): + """Tests 'shell' command.""" + + @mock.patch('helper.docker_run') + @mock.patch('helper.build_image_impl') + def test_base_runner_debug(self, _, __): + """Tests that shell base-runner-debug works as intended.""" + image_name = 'base-runner-debug' + unparsed_args = ['shell', image_name] + parser = helper.get_parser() + args = helper.parse_args(parser, unparsed_args) + args.sanitizer = 'address' + result = helper.shell(args) + self.assertTrue(result) + + +class BuildImageImplTest(unittest.TestCase): + """Tests for build_image_impl.""" + + @mock.patch('helper.docker_build') + def test_no_cache(self, mock_docker_build): + """Tests that cache=False is handled properly.""" + image_name = 'base-image' + helper.build_image_impl(helper.Project(image_name), cache=False) + self.assertIn('--no-cache', mock_docker_build.call_args_list[0][0][0]) + + @mock.patch('helper.docker_build') + @mock.patch('helper.pull_images') + def test_pull(self, mock_pull_images, _): + """Tests that pull=True is handled properly.""" + image_name = 'base-image' + project = helper.Project(image_name, is_external=True) + self.assertTrue(helper.build_image_impl(project, pull=True)) + mock_pull_images.assert_called_with('c++') + + @mock.patch('helper.docker_build') + def test_base_image(self, mock_docker_build): + """Tests that build_image_impl works as intended with a base-image.""" + image_name = 'base-image' + self.assertTrue(helper.build_image_impl(helper.Project(image_name))) + build_dir = os.path.join(helper.OSS_FUZZ_DIR, + 'infra/base-images/base-image') + mock_docker_build.assert_called_with([ + '-t', 'ghcr.io/aixcc-finals/base-image', '--file', + os.path.join(build_dir, 'Dockerfile'), build_dir + ]) + + @mock.patch('helper.docker_build') + def test_oss_fuzz_project(self, mock_docker_build): + """Tests that build_image_impl works as intended with an OSS-Fuzz + project.""" + project_name = 'example' + self.assertTrue(helper.build_image_impl(helper.Project(project_name))) + build_dir = os.path.join(helper.OSS_FUZZ_DIR, 'projects', project_name) + mock_docker_build.assert_called_with([ + '-t', 'gcr.io/oss-fuzz/example', '--file', + os.path.join(build_dir, 'Dockerfile'), build_dir + ]) + + @mock.patch('helper.docker_build') + def test_external_project(self, mock_docker_build): + """Tests that build_image_impl works as intended with a non-OSS-Fuzz + project.""" + with tempfile.TemporaryDirectory() as temp_dir: + project_src_path = os.path.join(temp_dir, 'example') + os.mkdir(project_src_path) + build_integration_path = 'build-integration' + project = helper.Project(project_src_path, + is_external=True, + build_integration_path=build_integration_path) + self.assertTrue(helper.build_image_impl(project)) + mock_docker_build.assert_called_with([ + '-t', 'gcr.io/oss-fuzz/example', '--file', + os.path.join(project_src_path, build_integration_path, 'Dockerfile'), + project_src_path + ]) + + +class GenerateImplTest(fake_filesystem_unittest.TestCase): + """Tests for _generate_impl.""" + PROJECT_NAME = 'newfakeproject' + PROJECT_LANGUAGE = 'python' + + def setUp(self): + self.maxDiff = None # pylint: disable=invalid-name + self.setUpPyfakefs() + self.fs.add_real_directory(helper.OSS_FUZZ_DIR) + + def _verify_templated_files(self, template_dict, directory, language): + template_args = { + 'project_name': self.PROJECT_NAME, + 'year': 2021, + 'base_builder': helper._base_builder_from_language(language), + 'language': language, + } + for filename, template in template_dict.items(): + file_path = os.path.join(directory, filename) + with open(file_path, 'r') as file_handle: + contents = file_handle.read() + self.assertEqual(contents, template % template_args) + + @mock.patch('helper._get_current_datetime', + return_value=datetime.datetime(year=2021, month=1, day=1)) + def test_generate_oss_fuzz_project(self, _): + """Tests that the correct files are generated for an OSS-Fuzz project.""" + helper._generate_impl(helper.Project(self.PROJECT_NAME), + self.PROJECT_LANGUAGE) + self._verify_templated_files( + templates.TEMPLATES, + os.path.join(helper.OSS_FUZZ_DIR, 'projects', self.PROJECT_NAME), + self.PROJECT_LANGUAGE) + + def test_generate_external_project(self): + """Tests that the correct files are generated for a non-OSS-Fuzz project.""" + build_integration_path = '/newfakeproject/build-integration' + helper._generate_impl( + helper.Project('/newfakeproject/', + is_external=True, + build_integration_path=build_integration_path), + self.PROJECT_LANGUAGE) + self._verify_templated_files(templates.EXTERNAL_TEMPLATES, + build_integration_path, self.PROJECT_LANGUAGE) + + @mock.patch('helper._get_current_datetime', + return_value=datetime.datetime(year=2021, month=1, day=1)) + def test_generate_swift_project(self, _): + """Tests that the swift project uses the correct base image.""" + helper._generate_impl(helper.Project(self.PROJECT_NAME), 'swift') + self._verify_templated_files( + templates.TEMPLATES, + os.path.join(helper.OSS_FUZZ_DIR, 'projects', self.PROJECT_NAME), + 'swift') + + +class ProjectTest(fake_filesystem_unittest.TestCase): + """Tests for Project class.""" + + def setUp(self): + self.project_name = 'project' + self.internal_project = helper.Project(self.project_name) + self.external_project_path = os.path.join('/path', 'to', self.project_name) + self.external_project = helper.Project(self.external_project_path, + is_external=True) + self.setUpPyfakefs() + + def test_init_external_project(self): + """Tests __init__ method for external projects.""" + self.assertEqual(self.external_project.name, self.project_name) + self.assertEqual(self.external_project.path, self.external_project_path) + self.assertEqual( + self.external_project.build_integration_path, + os.path.join(self.external_project_path, + constants.DEFAULT_EXTERNAL_BUILD_INTEGRATION_PATH)) + + def test_init_internal_project(self): + """Tests __init__ method for internal projects.""" + self.assertEqual(self.internal_project.name, self.project_name) + path = os.path.join(helper.OSS_FUZZ_DIR, 'projects', self.project_name) + self.assertEqual(self.internal_project.path, path) + self.assertEqual(self.internal_project.build_integration_path, path) + + def test_dockerfile_path_internal_project(self): + """Tests that dockerfile_path works as intended.""" + self.assertEqual( + self.internal_project.dockerfile_path, + os.path.join(helper.OSS_FUZZ_DIR, 'projects', self.project_name, + 'Dockerfile')) + + def test_dockerfile_path_external_project(self): + """Tests that dockerfile_path works as intended.""" + self.assertEqual( + self.external_project.dockerfile_path, + os.path.join(self.external_project_path, + constants.DEFAULT_EXTERNAL_BUILD_INTEGRATION_PATH, + 'Dockerfile')) + + def test_out(self): + """Tests that out works as intended.""" + out_dir = self.internal_project.out + self.assertEqual( + out_dir, + os.path.join(helper.OSS_FUZZ_DIR, 'build', 'out', self.project_name)) + self.assertTrue(os.path.exists(out_dir)) + + def test_work(self): + """Tests that work works as intended.""" + work_dir = self.internal_project.work + self.assertEqual( + work_dir, + os.path.join(helper.OSS_FUZZ_DIR, 'build', 'work', self.project_name)) + self.assertTrue(os.path.exists(work_dir)) + + def test_corpus(self): + """Tests that corpus works as intended.""" + corpus_dir = self.internal_project.corpus + self.assertEqual( + corpus_dir, + os.path.join(helper.OSS_FUZZ_DIR, 'build', 'corpus', self.project_name)) + self.assertTrue(os.path.exists(corpus_dir)) + + def test_language_internal_project(self): + """Tests that language works as intended for an internal project.""" + project_yaml_path = os.path.join(self.internal_project.path, 'project.yaml') + self.fs.create_file(project_yaml_path, contents='language: python') + self.assertEqual(self.internal_project.language, 'python') + + def test_language_external_project(self): + """Tests that language works as intended for an external project.""" + self.assertEqual(self.external_project.language, 'c++') diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/pr_helper.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/pr_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..4d93b24a5d2f05d85570ec2d10fad5480596c2e2 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/pr_helper.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ +"""Adds comments for PR to provide more information for approvers.""" +import base64 +import json +import os +import subprocess + +import requests +import yaml + +OWNER = 'google' +REPO = 'oss-fuzz' +GITHUB_URL = 'https://github.com/' +GITHUB_NONREF_URL = f'https://www.github.com/{OWNER}/{REPO}' # Github URL that doesn't send emails on linked issues. +API_URL = 'https://api.github.com' +BASE_URL = f'{API_URL}/repos/{OWNER}/{REPO}' +BRANCH = 'master' +CRITICALITY_SCORE_PATH = '/home/runner/go/bin/criticality_score' +COMMITS_LIMIT = 50 # Only process the most recent 50 commits. + + +def get_criticality_score(repo_url): + """Gets the criticality score of the project.""" + # Criticality score does not support repo url ends with '.git' + if repo_url.endswith('.git'): + repo_url = repo_url[:-4] + report = subprocess.run([ + CRITICALITY_SCORE_PATH, '--format', 'json', + '-gcp-project-id=clusterfuzz-external', '-depsdev-disable', repo_url + ], + capture_output=True, + text=True) + + try: + report_dict = json.loads(report.stdout) + except: + print(f'Criticality score failed with stdout: {report.stdout}') + print(f'Criticality score failed with stderr: {report.stderr}') + return 'N/A' + return report_dict.get('default_score', 'N/A') + + +def is_known_contributor(content, email): + """Checks if the author is in the contact list.""" + return (email == content.get('primary_contact') or + email in content.get('vendor_ccs', []) or + email in content.get('auto_ccs', [])) + + +def save_env(message, is_ready_for_merge, is_internal=False): + """Saves the outputs as environment variables.""" + with open(os.environ['GITHUB_ENV'], 'a') as github_env: + github_env.write(f'MESSAGE={message}\n') + github_env.write(f'IS_READY_FOR_MERGE={is_ready_for_merge}\n') + github_env.write(f'IS_INTERNAL={is_internal}') + + +def main(): + """Verifies if a PR is ready to merge.""" + github = GithubHandler() + + # Bypasses PRs of the internal members. + if github.is_author_internal_member(): + save_env(None, None, True) + return + + message = '' + is_ready_for_merge = True + pr_author = github.get_pr_author() + # Gets all modified projects path. + projects_path = github.get_projects_path() + verified, email = github.get_author_email() + + for project_path in projects_path: + project_url = f'{GITHUB_URL}/{OWNER}/{REPO}/tree/{BRANCH}/{project_path}' + content_dict = github.get_project_yaml(project_path) + + # Gets information for the new integrating project. + if not content_dict: + is_ready_for_merge = False + new_project = github.get_integrated_project_info() + repo_url = new_project.get('main_repo') + if repo_url is None: + message += (f'{pr_author} is integrating a new project, ' + 'but the `main_repo` is missing. ' + 'The criticality score cannot be computed.
') + else: + message += (f'{pr_author} is integrating a new project:
' + f'- Main repo: {repo_url}
- Criticality score: ' + f'{get_criticality_score(repo_url)}
') + continue + + # Checks if the author is in the contact list. + if email: + if is_known_contributor(content_dict, email): + # Checks if the email is verified. + verified_marker = ' (verified)' if verified else '' + message += ( + f'{pr_author}{verified_marker} is either the primary contact or ' + f'is in the CCs list of [{project_path}]({project_url}).
') + if verified: + continue + + # Checks the previous commits. + commit_sha = github.has_author_modified_project(project_path) + if commit_sha is None: + history_message = '' + contributors = github.get_past_contributors(project_path) + if contributors: + history_message = 'The past contributors are: ' + history_message += ', '.join(contributors) + message += ( + f'{pr_author} is a new contributor to ' + f'[{project_path}]({project_url}). The PR must be approved by known ' + f'contributors before it can be merged. {history_message}
') + is_ready_for_merge = False + continue + + # If the previous commit is not associated with a pull request. + pr_message = (f'{pr_author} has previously contributed to ' + f'[{project_path}]({project_url}). The previous commit was ' + f'{GITHUB_NONREF_URL}/commit/{commit_sha}
') + + previous_pr_number = github.get_pull_request_number(commit_sha) + if previous_pr_number is not None: + pr_message = (f'{pr_author} has previously contributed to ' + f'[{project_path}]({project_url}). ' + f'The previous PR was [#{previous_pr_number}]' + f'({GITHUB_NONREF_URL}/pull/{previous_pr_number})
') + message += pr_message + + save_env(message, is_ready_for_merge, False) + + +class GithubHandler: + """Github requests handler.""" + + def __init__(self): + self._pr_author = os.environ['PRAUTHOR'] + self._token = os.environ['GITHUBTOKEN'] + self._pr_number = os.environ['PRNUMBER'] + self._headers = { + 'Authorization': f'Bearer {self._token}', + 'X-GitHub-Api-Version': '2022-11-28' + } + self._maintainers = set() + os.environ['GITHUB_AUTH_TOKEN'] = self._token + + def get_pr_author(self): + """Gets the pr author user name.""" + return self._pr_author + + def get_projects_path(self): + """Gets the current project path.""" + response = requests.get(f'{BASE_URL}/pulls/{self._pr_number}/files', + headers=self._headers) + if not response.ok: + return [] + + projects_path = set() + for file in response.json(): + file_path = file['filename'] + dir_path = file_path.split(os.sep) + if len(dir_path) > 1 and dir_path[0] == 'projects': + projects_path.add(os.sep.join(dir_path[0:2])) + return list(projects_path) + + def get_author_email(self): + """Retrieves the author's email address for a pull request, + including non-public emails.""" + user_response = requests.get(f'{API_URL}/users/{self._pr_author}') + if user_response.ok: + email = user_response.json()['email'] + if email: + return True, email + + commits_response = requests.get( + f'{BASE_URL}/pulls/{self._pr_number}/commits', headers=self._headers) + if not commits_response.ok: + return False, None + email = commits_response.json()[0]['commit']['author']['email'] + verified = commits_response.json()[0]['commit']['verification']['verified'] + return verified, email + + def get_project_yaml(self, project_path): + """Gets the project yaml file.""" + contents_url = f'{BASE_URL}/contents/{project_path}/project.yaml' + return self.get_yaml_file_content(contents_url) + + def get_yaml_file_content(self, contents_url): + """Gets yaml file content.""" + response = requests.get(contents_url, headers=self._headers) + if not response.ok: + return {} + content = base64.b64decode(response.json()['content']).decode('UTF-8') + return yaml.safe_load(content) + + def get_integrated_project_info(self): + """Gets the new integrated project.""" + response = requests.get(f'{BASE_URL}/pulls/{self._pr_number}/files', + headers=self._headers) + + for file in response.json(): + file_path = file['filename'] + if 'project.yaml' in file_path: + return self.get_yaml_file_content(file['contents_url']) + + return {} + + def get_pull_request_number(self, commit): + """Gets the pull request number.""" + pr_response = requests.get(f'{BASE_URL}/commits/{commit}/pulls', + headers=self._headers) + if not pr_response.ok: + return None + return pr_response.json()[0]['number'] + + def get_past_contributors(self, project_path): + """Returns a list of past contributors of a certain project.""" + commits_response = requests.get(f'{BASE_URL}/commits?path={project_path}', + headers=self._headers) + + if not commits_response.ok: + return [] + commits = commits_response.json() + contributors: dict[str, bool] = {} + for i, commit in enumerate(commits): + if i >= COMMITS_LIMIT: + break + + if not commit['author'] or not commit['commit']: + continue + + login = commit['author']['login'] + verified = commit['commit']['verification']['verified'] + if login in self._maintainers: + continue + if login not in contributors: + contributors[login] = verified + if verified: + # Override previous verification bit. + contributors[login] = True + + all_contributors = [] + for login, verified in contributors.items(): + login_verify = login if verified else f'{login} (unverified)' + all_contributors.append(login_verify) + + return all_contributors + + def get_maintainers(self): + """Get a list of internal members.""" + if self._maintainers: + return self._maintainers + + response = requests.get(f'{BASE_URL}/contents/infra/MAINTAINERS.csv', + headers=self._headers) + if not response.ok: + return self._maintainers + + maintainers_file = base64.b64decode( + response.json()['content']).decode('UTF-8') + for line in maintainers_file.split(os.linesep): + self._maintainers.add(line.split(',')[2]) + return self._maintainers + + def is_author_internal_member(self): + """Returns if the author is an internal member.""" + return self._pr_author in self.get_maintainers() + + def has_author_modified_project(self, project_path): + """Checks if the author has modified this project before.""" + commits_response = requests.get( + f'{BASE_URL}/commits?path={project_path}&author={self._pr_author}', + headers=self._headers) + + if not commits_response.ok or not commits_response.json(): + return None + + commit = commits_response.json()[0] + return commit['sha'] + + +if __name__ == '__main__': + main() diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/pytest.ini b/local-test-libxml2-delta-01/fuzz-tooling/infra/pytest.ini new file mode 100644 index 0000000000000000000000000000000000000000..2a10272e26592f4f8bdea82c1675c767b4488dec --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/pytest.ini @@ -0,0 +1,3 @@ +[pytest] +python_files = *_test.py +log_cli = true \ No newline at end of file diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/repo_manager_test.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/repo_manager_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7bfa64d0f41f40ab3024e58ec45ccd3ae81dbfac --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/repo_manager_test.py @@ -0,0 +1,201 @@ +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Test the functionality of the RepoManager class.""" + +import contextlib +import os +import tempfile +import unittest +from unittest import mock + +import repo_manager +import utils + +# pylint: disable=protected-access + +OSS_FUZZ_REPO_URL = 'https://github.com/google/oss-fuzz' + + +@contextlib.contextmanager +def get_oss_fuzz_repo(): + """Clones a temporary copy of the OSS-Fuzz repo. Returns the path to the + repo.""" + repo_name = 'oss-fuzz' + with tempfile.TemporaryDirectory() as tmp_dir: + repo_manager._clone(OSS_FUZZ_REPO_URL, tmp_dir, repo_name) + yield os.path.join(tmp_dir, repo_name) + + +class CloneTest(unittest.TestCase): + """Tests the _clone function.""" + + @unittest.skipIf(not os.getenv('INTEGRATION_TESTS'), + 'INTEGRATION_TESTS=1 not set') + def test_clone_valid_repo_integration(self): + """Integration test that tests the correct location of the git repo.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + git_path = os.path.join(oss_fuzz_repo, '.git') + self.assertTrue(os.path.isdir(git_path)) + + def test_clone_invalid_repo(self): + """Tests that cloning an invalid repo will fail.""" + with tempfile.TemporaryDirectory() as tmp_dir: + with self.assertRaises(RuntimeError): + repo_manager._clone('https://github.com/oss-fuzz-not-real.git', tmp_dir, + 'oss-fuzz') + + @mock.patch('utils.execute') + def test_clone_with_username(self, mock_execute): # pylint: disable=no-self-use + """Test clone with username.""" + repo_manager._clone('https://github.com/fake/repo.git', + '/', + 'name', + username='user', + password='password') + mock_execute.assert_called_once_with([ + 'git', 'clone', 'https://user:password@github.com/fake/repo.git', 'name' + ], + location='/', + check_result=True, + log_command=False) + + +@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'), + 'INTEGRATION_TESTS=1 not set') +class RepoManagerCheckoutTest(unittest.TestCase): + """Tests the checkout functionality of RepoManager.""" + + def test_checkout_valid_commit(self): + """Tests that the git checkout command works.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + commit_to_test = '04ea24ee15bbe46a19e5da6c5f022a2ffdfbdb3b' + repo_man.checkout_commit(commit_to_test) + self.assertEqual(commit_to_test, repo_man.get_current_commit()) + + def test_checkout_invalid_commit(self): + """Tests that the git checkout invalid commit fails.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + with self.assertRaises(ValueError): + repo_man.checkout_commit(' ') + with self.assertRaises(ValueError): + repo_man.checkout_commit('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') + with self.assertRaises(ValueError): + repo_man.checkout_commit('not-a-valid-commit') + + +@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'), + 'INTEGRATION_TESTS=1 not set') +class RepoManagerGetCommitListTest(unittest.TestCase): + """Tests the get_commit_list method of RepoManager.""" + + def test_get_valid_commit_list(self): + """Tests an accurate commit list can be retrieved from the repo manager.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + old_commit = '04ea24ee15bbe46a19e5da6c5f022a2ffdfbdb3b' + new_commit = 'fa662173bfeb3ba08d2e84cefc363be11e6c8463' + commit_list = [ + 'fa662173bfeb3ba08d2e84cefc363be11e6c8463', + '17035317a44fa89d22fe6846d868d4bf57def78b', + '97dee00a3c4ce95071c3e061592f5fd577dea886', + '04ea24ee15bbe46a19e5da6c5f022a2ffdfbdb3b' + ] + result_list = repo_man.get_commit_list(new_commit, old_commit) + self.assertListEqual(commit_list, result_list) + + def test_get_invalid_commit_list(self): + """Tests that the proper errors are thrown when invalid commits are + passed to get_commit_list.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + old_commit = '04ea24ee15bbe46a19e5da6c5f022a2ffdfbdb3b' + new_commit = 'fa662173bfeb3ba08d2e84cefc363be11e6c8463' + with self.assertRaises(ValueError): + repo_man.get_commit_list('fakecommit', new_commit) + with self.assertRaises(ValueError): + repo_man.get_commit_list(new_commit, 'fakecommit') + with self.assertRaises(RuntimeError): + repo_man.get_commit_list(old_commit, new_commit) # pylint: disable=arguments-out-of-order + + +@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'), + 'INTEGRATION_TESTS=1 not set') +class GitDiffTest(unittest.TestCase): + """Tests get_git_diff.""" + + def test_diff_exists(self): + """Tests that a real diff is returned when a valid repo manager exists.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + with mock.patch.object(utils, + 'execute', + return_value=('test.py\ndiff.py', None, 0)): + diff = repo_man.get_git_diff() + self.assertCountEqual(diff, ['test.py', 'diff.py']) + + def test_diff_empty(self): + """Tests that None is returned when there is no difference between repos.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + with mock.patch.object(utils, 'execute', return_value=('', None, 0)): + diff = repo_man.get_git_diff() + self.assertIsNone(diff) + + def test_error_on_command(self): + """Tests that None is returned when the command errors out.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + with mock.patch.object(utils, + 'execute', + return_value=('', 'Test error.', 1)): + diff = repo_man.get_git_diff() + self.assertIsNone(diff) + + def test_diff_no_change(self): + """Tests that None is returned when there is no difference between repos.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + diff = repo_man.get_git_diff() + self.assertIsNone(diff) + + +@unittest.skipIf(not os.getenv('INTEGRATION_TESTS'), + 'INTEGRATION_TESTS=1 not set') +class CheckoutPrIntegrationTest(unittest.TestCase): + """Does Integration tests on the checkout_pr method of RepoManager.""" + + def test_pull_request_exists(self): + """Tests that a diff is returned when a valid PR is checked out.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + repo_man.checkout_pr('refs/pull/3415/merge') + diff = repo_man.get_git_diff() + self.assertCountEqual(diff, ['README.md']) + + def test_checkout_invalid_pull_request(self): + """Tests that the git checkout invalid pull request fails.""" + with get_oss_fuzz_repo() as oss_fuzz_repo: + repo_man = repo_manager.RepoManager(oss_fuzz_repo) + with self.assertRaises(RuntimeError): + repo_man.checkout_pr(' ') + with self.assertRaises(RuntimeError): + repo_man.checkout_pr('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') + with self.assertRaises(RuntimeError): + repo_man.checkout_pr('not/a/valid/pr') + + +if __name__ == '__main__': + unittest.main() diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/retry.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/retry.py new file mode 100644 index 0000000000000000000000000000000000000000..1f6d54b8d00d081b672a7cbb9f998b5b92c624ce --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/retry.py @@ -0,0 +1,106 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Retry decorator. Copied from ClusterFuzz source.""" + +import functools +import inspect +import logging +import sys +import time + +# pylint: disable=too-many-arguments,broad-except + + +def sleep(seconds): + """Invoke time.sleep. This is to avoid the flakiness of time.sleep. See: + crbug.com/770375""" + time.sleep(seconds) + + +def get_delay(num_try, delay, backoff): + """Compute backoff delay.""" + return delay * (backoff**(num_try - 1)) + + +def wrap(retries, + delay, + backoff=2, + exception_type=Exception, + retry_on_false=False): + """Retry decorator for a function.""" + + assert delay > 0 + assert backoff >= 1 + assert retries >= 0 + + def decorator(func): + """Decorator for the given function.""" + tries = retries + 1 + is_generator = inspect.isgeneratorfunction(func) + function_with_type = func.__qualname__ + if is_generator: + function_with_type += ' (generator)' + + def handle_retry(num_try, exception=None): + """Handle retry.""" + if (exception is None or + isinstance(exception, exception_type)) and num_try < tries: + logging.info('Retrying on %s failed with %s. Retrying again.', + function_with_type, + sys.exc_info()[1]) + sleep(get_delay(num_try, delay, backoff)) + return True + + logging.error('Retrying on %s failed with %s. Raise.', function_with_type, + sys.exc_info()[1]) + return False + + @functools.wraps(func) + def _wrapper(*args, **kwargs): + """Regular function wrapper.""" + for num_try in range(1, tries + 1): + try: + result = func(*args, **kwargs) + if retry_on_false and not result: + if not handle_retry(num_try): + return result + + continue + return result + except Exception as error: + if not handle_retry(num_try, exception=error): + raise + + @functools.wraps(func) + def _generator_wrapper(*args, **kwargs): + """Generator function wrapper.""" + # This argument is not applicable for generator functions. + assert not retry_on_false + already_yielded_element_count = 0 + for num_try in range(1, tries + 1): + try: + for index, result in enumerate(func(*args, **kwargs)): + if index >= already_yielded_element_count: + yield result + already_yielded_element_count += 1 + break + except Exception as error: + if not handle_retry(num_try, exception=error): + raise + + if is_generator: + return _generator_wrapper + return _wrapper + + return decorator diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/run_fuzzers.Dockerfile b/local-test-libxml2-delta-01/fuzz-tooling/infra/run_fuzzers.Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..81accee186d02b6871e3da841e5e73740ff0c8d4 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/run_fuzzers.Dockerfile @@ -0,0 +1,31 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +################################################################################ +# Docker image for running fuzzers on CIFuzz (the run_fuzzers action on GitHub +# actions). + +FROM ghcr.io/aixcc-finals/cifuzz-base + +# Python file to execute when the docker container starts up. +# We can't use the env var $OSS_FUZZ_ROOT here. Since it's a constant env var, +# just expand to '/opt/oss-fuzz'. +ENTRYPOINT ["python3", "/opt/oss-fuzz/infra/cifuzz/run_fuzzers_entrypoint.py"] + +WORKDIR ${OSS_FUZZ_ROOT}/infra + +# Copy infra source code. +ADD . ${OSS_FUZZ_ROOT}/infra + +RUN python3 -m pip install -r ${OSS_FUZZ_ROOT}/infra/cifuzz/requirements.txt diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/test b/local-test-libxml2-delta-01/fuzz-tooling/infra/test new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/test @@ -0,0 +1 @@ + diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/test_repos.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/test_repos.py new file mode 100644 index 0000000000000000000000000000000000000000..389876864b59e7ec46f76f8311c323c22f763e74 --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/test_repos.py @@ -0,0 +1,84 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This module contains a list of test repository's used in unit/integration +tests. + +Note: If you notice tests failing for unexpected reasons, make sure the data +in the test repos are correct. This is because the test repos are dynamic and +may change. + +Note: This should be removed when a better method of testing is established. +""" + +import collections +import os + +ExampleRepo = collections.namedtuple('ExampleRepo', [ + 'project_name', 'oss_repo_name', 'git_repo_name', 'image_location', + 'git_url', 'new_commit', 'old_commit', 'intro_commit', 'fuzz_target', + 'testcase_path' +]) + +TEST_DIR_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'testcases') + +# WARNING: Tests are dependent upon the following repos existing and the +# specified commits existing. +# TODO(metzman): Fix this problem. +# TODO(metzman): The testcases got deleted here because the test that used them +# was skipped. Probably worth deleting the test. +TEST_REPOS = [ + ExampleRepo(project_name='curl', + oss_repo_name='curl', + git_repo_name='curl', + image_location='/src', + git_url='https://github.com/curl/curl.git', + old_commit='df26f5f9c36e19cd503c0e462e9f72ad37b84c82', + new_commit='dda418266c99ceab368d723facb52069cbb9c8d5', + intro_commit='df26f5f9c36e19cd503c0e462e9f72ad37b84c82', + fuzz_target='curl_fuzzer_ftp', + testcase_path=os.path.join(TEST_DIR_PATH, 'curl_test_data')), + ExampleRepo(project_name='libarchive', + oss_repo_name='libarchive', + git_repo_name='libarchive', + image_location='/src', + git_url='https://github.com/libarchive/libarchive.git', + old_commit='5bd2a9b6658a3a6efa20bb9ad75bd39a44d71da6', + new_commit='458e49358f17ec58d65ab1c45cf299baaf3c98d1', + intro_commit='840266712006de5e737f8052db920dfea2be4260', + fuzz_target='libarchive_fuzzer', + testcase_path=os.path.join(TEST_DIR_PATH, + 'libarchive_test_data')), + ExampleRepo(project_name='gonids', + oss_repo_name='gonids', + git_repo_name='gonids', + image_location='/root/go/src/github.com/google/', + git_url='https://github.com/google/gonids', + old_commit='', + new_commit='', + intro_commit='', + fuzz_target='', + testcase_path='') +] + +INVALID_REPO = ExampleRepo(project_name='notaproj', + oss_repo_name='notarepo', + git_repo_name='notarepo', + git_url='invalid.git', + image_location='/src', + old_commit='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + new_commit='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + intro_commit='aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa', + fuzz_target='NONEFUZZER', + testcase_path='not/a/path') diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/utils.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..661a773e74b4ee4518bf489989ed05ac866b712a --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/utils.py @@ -0,0 +1,205 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Utilities for OSS-Fuzz infrastructure.""" + +import logging +import os +import posixpath +import re +import shlex +import stat +import subprocess +import sys + +import helper + +ALLOWED_FUZZ_TARGET_EXTENSIONS = ['', '.exe'] +FUZZ_TARGET_SEARCH_STRING = 'LLVMFuzzerTestOneInput' +VALID_TARGET_NAME_REGEX = re.compile(r'^[a-zA-Z0-9_-]+$') +BLOCKLISTED_TARGET_NAME_REGEX = re.compile(r'^(jazzer_driver.*)$') + +# Location of google cloud storage for latest OSS-Fuzz builds. +GCS_BASE_URL = 'https://storage.googleapis.com/' + + +def chdir_to_root(): + """Changes cwd to OSS-Fuzz root directory.""" + # Change to oss-fuzz main directory so helper.py runs correctly. + if os.getcwd() != helper.OSS_FUZZ_DIR: + os.chdir(helper.OSS_FUZZ_DIR) + + +def command_to_string(command): + """Returns the stringfied version of |command| a list representing a binary to + run and arguments to pass to it or a string representing a binary to run.""" + if isinstance(command, str): + return command + return shlex.join(command) + + +def execute(command, + env=None, + location=None, + check_result=False, + log_command=True): + """Runs a shell command in the specified directory location. + + Args: + command: The command as a list to be run. + env: (optional) an environment to pass to Popen to run the command in. + location (optional): The directory to run command in. + check_result (optional): Should an exception be thrown on failure. + + Returns: + stdout, stderr, returncode. + + Raises: + RuntimeError: running a command resulted in an error. + """ + + if not location: + location = os.getcwd() + process = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + cwd=location, + env=env) + out, err = process.communicate() + out = out.decode('utf-8', errors='ignore') + err = err.decode('utf-8', errors='ignore') + + if log_command: + command_str = command_to_string(command) + display_err = err + else: + command_str = 'redacted' + display_err = 'redacted' + + if err: + logging.debug('Stderr of command "%s" is: %s.', command_str, display_err) + if check_result and process.returncode: + raise RuntimeError('Executing command "{0}" failed with error: {1}.'.format( + command_str, display_err)) + return out, err, process.returncode + + +def get_fuzz_targets(path): + """Gets fuzz targets in a directory. + + Args: + path: A path to search for fuzz targets in. + + Returns: + A list of paths to fuzzers or an empty list if None. + """ + if not os.path.exists(path): + return [] + fuzz_target_paths = [] + for root, _, fuzzers in os.walk(path): + for fuzzer in fuzzers: + file_path = os.path.join(root, fuzzer) + if is_fuzz_target_local(file_path): + fuzz_target_paths.append(file_path) + + return fuzz_target_paths + + +def get_container_name(): + """Gets the name of the current docker container you are in. + + Returns: + Container name or None if not in a container. + """ + result = subprocess.run( # pylint: disable=subprocess-run-check + ['systemd-detect-virt', '-c'], + stdout=subprocess.PIPE).stdout + if b'docker' not in result: + return None + with open('/etc/hostname') as file_handle: + return file_handle.read().strip() + + +def is_executable(file_path): + """Returns True if |file_path| is an exectuable.""" + return os.path.exists(file_path) and os.access(file_path, os.X_OK) + + +def is_fuzz_target_local(file_path): + """Returns whether |file_path| is a fuzz target binary (local path). + Copied from clusterfuzz src/python/bot/fuzzers/utils.py + with slight modifications. + """ + # pylint: disable=too-many-return-statements + filename, file_extension = os.path.splitext(os.path.basename(file_path)) + if not VALID_TARGET_NAME_REGEX.match(filename): + # Check fuzz target has a valid name (without any special chars). + return False + + if BLOCKLISTED_TARGET_NAME_REGEX.match(filename): + # Check fuzz target an explicitly disallowed name (e.g. binaries used for + # jazzer-based targets). + return False + + if file_extension not in ALLOWED_FUZZ_TARGET_EXTENSIONS: + # Ignore files with disallowed extensions (to prevent opening e.g. .zips). + return False + + if not is_executable(file_path): + return False + + if filename.endswith('_fuzzer'): + return True + + if os.path.exists(file_path) and not stat.S_ISREG(os.stat(file_path).st_mode): + return False + + with open(file_path, 'rb') as file_handle: + return file_handle.read().find(FUZZ_TARGET_SEARCH_STRING.encode()) != -1 + + +def binary_print(string): + """Prints string. Can print a binary string.""" + if isinstance(string, bytes): + string += b'\n' + else: + string += '\n' + sys.stdout.buffer.write(string) + sys.stdout.flush() + + +def url_join(*url_parts): + """Joins URLs together using the POSIX join method. + + Args: + url_parts: Sections of a URL to be joined. + + Returns: + Joined URL. + """ + return posixpath.join(*url_parts) + + +def gs_url_to_https(url): + """Converts |url| from a GCS URL (beginning with 'gs://') to an HTTPS one.""" + return url_join(GCS_BASE_URL, remove_prefix(url, 'gs://')) + + +def remove_prefix(string, prefix): + """Returns |string| without the leading substring |prefix|.""" + # Match behavior of removeprefix from python3.9: + # https://www.python.org/dev/peps/pep-0616/ + if string.startswith(prefix): + return string[len(prefix):] + + return string diff --git a/local-test-libxml2-delta-01/fuzz-tooling/infra/utils_test.py b/local-test-libxml2-delta-01/fuzz-tooling/infra/utils_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9b7fbc90323af958c2d9fdcbe5d7889b84f0db2b --- /dev/null +++ b/local-test-libxml2-delta-01/fuzz-tooling/infra/utils_test.py @@ -0,0 +1,151 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests the functionality of the utils module's functions""" + +import os +import tempfile +import unittest +from unittest import mock + +import utils +import helper + +EXAMPLE_PROJECT = 'example' + +TEST_OUT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), + 'cifuzz', 'test_data', 'build-out') + + +class IsFuzzTargetLocalTest(unittest.TestCase): + """Tests the is_fuzz_target_local function.""" + + def test_invalid_filepath(self): + """Tests the function with an invalid file path.""" + is_local = utils.is_fuzz_target_local('not/a/real/file') + self.assertFalse(is_local) + is_local = utils.is_fuzz_target_local('') + self.assertFalse(is_local) + is_local = utils.is_fuzz_target_local(' ') + self.assertFalse(is_local) + + def test_valid_filepath(self): + """Checks is_fuzz_target_local function with a valid filepath.""" + + is_local = utils.is_fuzz_target_local( + os.path.join(TEST_OUT_DIR, 'example_crash_fuzzer')) + self.assertTrue(is_local) + is_local = utils.is_fuzz_target_local(TEST_OUT_DIR) + self.assertFalse(is_local) + + +class GetFuzzTargetsTest(unittest.TestCase): + """Tests the get_fuzz_targets function.""" + + def test_valid_filepath(self): + """Tests that fuzz targets can be retrieved once the fuzzers are built.""" + fuzz_targets = utils.get_fuzz_targets(TEST_OUT_DIR) + crash_fuzzer_path = os.path.join(TEST_OUT_DIR, 'example_crash_fuzzer') + nocrash_fuzzer_path = os.path.join(TEST_OUT_DIR, 'example_nocrash_fuzzer') + self.assertCountEqual(fuzz_targets, + [crash_fuzzer_path, nocrash_fuzzer_path]) + + # Testing on a arbitrary directory with no fuzz targets in it. + fuzz_targets = utils.get_fuzz_targets( + os.path.join(helper.OSS_FUZZ_DIR, 'infra', 'travis')) + self.assertFalse(fuzz_targets) + + def test_invalid_filepath(self): + """Tests what get_fuzz_targets return when invalid filepath is used.""" + fuzz_targets = utils.get_fuzz_targets('not/a/valid/file/path') + self.assertFalse(fuzz_targets) + + +class ExecuteTest(unittest.TestCase): + """Tests the execute function.""" + + def test_valid_command(self): + """Tests that execute can produce valid output.""" + with tempfile.TemporaryDirectory() as tmp_dir: + out, err, err_code = utils.execute(['ls', '.'], + location=tmp_dir, + check_result=False) + self.assertEqual(err_code, 0) + self.assertEqual(err, '') + self.assertEqual(out, '') + out, err, err_code = utils.execute(['mkdir', 'tmp'], + location=tmp_dir, + check_result=False) + self.assertEqual(err_code, 0) + self.assertEqual(err, '') + self.assertEqual(out, '') + out, err, err_code = utils.execute(['ls', '.'], + location=tmp_dir, + check_result=False) + self.assertEqual(err_code, 0) + self.assertEqual(err, '') + self.assertEqual(out, 'tmp\n') + + def test_error_command(self): + """Tests that execute can correctly surface errors.""" + with tempfile.TemporaryDirectory() as tmp_dir: + out, err, err_code = utils.execute(['ls', 'notarealdir'], + location=tmp_dir, + check_result=False) + self.assertEqual(err_code, 2) + self.assertIsNotNone(err) + self.assertEqual(out, '') + with self.assertRaises(RuntimeError): + out, err, err_code = utils.execute(['ls', 'notarealdir'], + location=tmp_dir, + check_result=True) + + +class BinaryPrintTest(unittest.TestCase): + """Tests for utils.binary_print.""" + + @unittest.skip('Causes spurious failures because of side-effects.') + def test_string(self): # pylint: disable=no-self-use + """Tests that utils.binary_print can print a regular string.""" + # Should execute without raising any exceptions. + with mock.patch('sys.stdout.buffer.write') as mock_write: + utils.binary_print('hello') + mock_write.assert_called_with('hello\n') + + @unittest.skip('Causes spurious failures because of side-effects.') + def test_binary_string(self): # pylint: disable=no-self-use + """Tests that utils.binary_print can print a bianry string.""" + # Should execute without raising any exceptions. + with mock.patch('sys.stdout.buffer.write') as mock_write: + utils.binary_print(b'hello') + mock_write.assert_called_with(b'hello\n') + + +class CommandToStringTest(unittest.TestCase): + """Tests for command_to_string.""" + + def test_string(self): + """Tests that command_to_string returns the argument passed to it when it is + passed a string.""" + command = 'command' + self.assertEqual(utils.command_to_string(command), command) + + def test_list(self): + """Tests that command_to_string returns the correct stringwhen it is passed + a list.""" + command = ['command', 'arg1', 'arg2'] + self.assertEqual(utils.command_to_string(command), 'command arg1 arg2') + + +if __name__ == '__main__': + unittest.main()